Version 3.15.12

V8_Fatal now prints C++ stack trace in debug mode.

Added HTML-based tick processor.

Continued implementation of Object.observe (V8 issue 2409).

Fixed V8 issues 2243, 2340, 2393, 2399, 2457.

Fixed Chromium issues 125308, 165637, 166379, 166553.

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@13262 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 52601a4..f182f17 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,18 @@
+2012-12-21: Version 3.15.12
+
+        V8_Fatal now prints C++ stack trace in debug mode.
+
+        Added HTML-based tick processor.
+
+        Continued implementation of Object.observe (V8 issue 2409).
+
+        Fixed V8 issues 2243, 2340, 2393, 2399, 2457.
+
+        Fixed Chromium issues 125308, 165637, 166379, 166553.
+
+        Performance and stability improvements on all platforms.
+
+
 2012-12-10: Version 3.15.11
 
         Define CAN_USE_VFP2/3_INSTRUCTIONS based on arm_neon and arm_fpu GYP
diff --git a/Makefile b/Makefile
index b65ea4c..63cfbf4 100644
--- a/Makefile
+++ b/Makefile
@@ -62,6 +62,12 @@
 ifeq ($(verifyheap), on)
   GYPFLAGS += -Dv8_enable_verify_heap=1
 endif
+# backtrace=off
+ifeq ($(backtrace), off)
+  GYPFLAGS += -Dv8_enable_backtrace=0
+else
+  GYPFLAGS += -Dv8_enable_backtrace=1
+endif
 # snapshot=off
 ifeq ($(snapshot), off)
   GYPFLAGS += -Dv8_use_snapshot='false'
@@ -81,6 +87,12 @@
 ifeq ($(liveobjectlist), on)
   GYPFLAGS += -Dv8_use_liveobjectlist=true
 endif
+# vfp2=off
+ifeq ($(vfp2), off)
+  GYPFLAGS += -Dv8_can_use_vfp2_instructions=false
+else
+  GYPFLAGS += -Dv8_can_use_vfp2_instructions=true
+endif
 # vfp3=off
 ifeq ($(vfp3), off)
   GYPFLAGS += -Dv8_can_use_vfp3_instructions=false
diff --git a/build/common.gypi b/build/common.gypi
index e68ee15..0144435 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -68,6 +68,8 @@
 
     'v8_enable_debugger_support%': 1,
 
+    'v8_enable_backtrace%': 0,
+
     'v8_enable_disassembler%': 0,
 
     'v8_enable_gdbjit%': 0,
@@ -160,7 +162,7 @@
           [ 'v8_use_arm_eabi_hardfloat=="true"', {
             'defines': [
               'USE_EABI_HARDFLOAT=1',
-              'CAN_USE_VFP3_INSTRUCTIONS',
+              'CAN_USE_VFP2_INSTRUCTIONS',
             ],
             'target_conditions': [
               ['_toolset=="target"', {
@@ -368,6 +370,10 @@
             'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
                         '-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
           }],
+          ['OS=="linux" and v8_enable_backtrace==1', {
+            # Support for backtrace_symbols.
+            'ldflags': [ '-rdynamic' ],
+          }],
           ['OS=="android"', {
             'variables': {
               'android_full_debug%': 1,
diff --git a/build/standalone.gypi b/build/standalone.gypi
index 7145a16..29344c7 100644
--- a/build/standalone.gypi
+++ b/build/standalone.gypi
@@ -32,6 +32,7 @@
     'library%': 'static_library',
     'component%': 'static_library',
     'visibility%': 'hidden',
+    'v8_enable_backtrace%': 0,
     'msvs_multi_core_compile%': '1',
     'mac_deployment_target%': '10.5',
     'variables': {
@@ -100,7 +101,7 @@
           [ 'OS=="linux"', {
             'cflags': [ '-ansi' ],
           }],
-          [ 'visibility=="hidden"', {
+          [ 'visibility=="hidden" and v8_enable_backtrace==0', {
             'cflags': [ '-fvisibility=hidden' ],
           }],
           [ 'component=="shared_library"', {
diff --git a/include/v8.h b/include/v8.h
index f577e93..b35500d 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -1253,12 +1253,8 @@
 
   /**
    * Allocates a new string from either UTF-8 encoded or ASCII data.
-   * The second parameter 'length' gives the buffer length.
-   * If the data is UTF-8 encoded, the caller must
-   * be careful to supply the length parameter.
-   * If it is not given, the function calls
-   * 'strlen' to determine the buffer length, it might be
-   * wrong if 'data' contains a null character.
+   * The second parameter 'length' gives the buffer length. If omitted,
+   * the function calls 'strlen' to determine the buffer length.
    */
   V8EXPORT static Local<String> New(const char* data, int length = -1);
 
@@ -2306,7 +2302,8 @@
   static Local<FunctionTemplate> New(
       InvocationCallback callback = 0,
       Handle<Value> data = Handle<Value>(),
-      Handle<Signature> signature = Handle<Signature>());
+      Handle<Signature> signature = Handle<Signature>(),
+      int length = 0);
   /** Returns the unique function instance in the current execution context.*/
   Local<Function> GetFunction();
 
@@ -2318,6 +2315,9 @@
   void SetCallHandler(InvocationCallback callback,
                       Handle<Value> data = Handle<Value>());
 
+  /** Set the predefined length property for the FunctionTemplate. */
+  void SetLength(int length);
+
   /** Get the InstanceTemplate. */
   Local<ObjectTemplate> InstanceTemplate();
 
@@ -2330,7 +2330,6 @@
    */
   Local<ObjectTemplate> PrototypeTemplate();
 
-
   /**
    * Set the class name of the FunctionTemplate.  This is used for
    * printing objects created with the function created from the
diff --git a/src/api.cc b/src/api.cc
index 95e5340..11d0a40 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1053,7 +1053,7 @@
 
 
 Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback,
-    v8::Handle<Value> data, v8::Handle<Signature> signature) {
+    v8::Handle<Value> data, v8::Handle<Signature> signature, int length) {
   i::Isolate* isolate = i::Isolate::Current();
   EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()");
   LOG_API(isolate, "FunctionTemplate::New");
@@ -1070,6 +1070,7 @@
     if (data.IsEmpty()) data = v8::Undefined();
     Utils::ToLocal(obj)->SetCallHandler(callback, data);
   }
+  obj->set_length(length);
   obj->set_undetectable(false);
   obj->set_needs_access_check(false);
 
@@ -1240,6 +1241,14 @@
 }
 
 
+void FunctionTemplate::SetLength(int length) {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetLength()")) return;
+  ENTER_V8(isolate);
+  Utils::OpenHandle(this)->set_length(length);
+}
+
+
 void FunctionTemplate::SetClassName(Handle<String> name) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetClassName()")) return;
@@ -1842,7 +1851,7 @@
     if (!raw_obj->IsJSObject()) return v8::Local<Value>();
     i::HandleScope scope(isolate_);
     i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
-    i::Handle<i::String> name = isolate_->factory()->LookupAsciiSymbol("stack");
+    i::Handle<i::String> name = isolate_->factory()->stack_symbol();
     if (!obj->HasProperty(*name)) return v8::Local<Value>();
     i::Handle<i::Object> value = i::GetProperty(obj, name);
     if (value.is_null()) return v8::Local<Value>();
@@ -1953,7 +1962,7 @@
                                                i::Handle<i::Object> argv[],
                                                bool* has_pending_exception) {
   i::Isolate* isolate = i::Isolate::Current();
-  i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name);
+  i::Handle<i::String> fmt_str = isolate->factory()->LookupUtf8Symbol(name);
   i::Object* object_fun =
       isolate->js_builtins_object()->GetPropertyNoExceptionThrown(*fmt_str);
   i::Handle<i::JSFunction> fun =
@@ -2369,7 +2378,7 @@
 static i::Object* LookupBuiltin(i::Isolate* isolate,
                                 const char* builtin_name) {
   i::Handle<i::String> symbol =
-      isolate->factory()->LookupAsciiSymbol(builtin_name);
+      isolate->factory()->LookupUtf8Symbol(builtin_name);
   i::Handle<i::JSBuiltinsObject> builtins = isolate->js_builtins_object();
   return builtins->GetPropertyNoExceptionThrown(*symbol);
 }
@@ -5129,8 +5138,8 @@
 
   i::HandleScope scope(isolate);
   // Get the function ResetDateCache (defined in date.js).
-  i::Handle<i::String> func_name_str =
-      isolate->factory()->LookupAsciiSymbol("ResetDateCache");
+  i::Handle<i::String> func_name_str = isolate->factory()->LookupOneByteSymbol(
+      STATIC_ASCII_VECTOR("ResetDateCache"));
   i::MaybeObject* result =
       isolate->js_builtins_object()->GetProperty(*func_name_str);
   i::Object* object_func;
@@ -5160,7 +5169,7 @@
   if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm';
   if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i';
   ASSERT(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf)));
-  return FACTORY->LookupSymbol(
+  return FACTORY->LookupOneByteSymbol(
       i::Vector<const char>(flags_buf, num_flags));
 }
 
@@ -5265,8 +5274,8 @@
   LOG_API(isolate, "String::NewSymbol(char)");
   ENTER_V8(isolate);
   if (length == -1) length = i::StrLength(data);
-  i::Handle<i::String> result =
-      isolate->factory()->LookupSymbol(i::Vector<const char>(data, length));
+  i::Handle<i::String> result = isolate->factory()->LookupUtf8Symbol(
+      i::Vector<const char>(data, length));
   return Utils::ToLocal(result);
 }
 
@@ -5960,8 +5969,8 @@
   i::Debug* isolate_debug = isolate->debug();
   isolate_debug->Load();
   i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
-  i::Handle<i::String> name =
-      isolate->factory()->LookupAsciiSymbol("MakeMirror");
+  i::Handle<i::String> name = isolate->factory()->LookupOneByteSymbol(
+      STATIC_ASCII_VECTOR("MakeMirror"));
   i::Handle<i::Object> fun_obj = i::GetProperty(debug, name);
   i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj);
   v8::Handle<v8::Function> v8_fun = Utils::ToLocal(fun);
@@ -6023,11 +6032,11 @@
   const i::CodeEntry* entry = node->entry();
   if (!entry->has_name_prefix()) {
     return Handle<String>(ToApi<String>(
-        isolate->factory()->LookupAsciiSymbol(entry->name())));
+        isolate->factory()->LookupUtf8Symbol(entry->name())));
   } else {
     return Handle<String>(ToApi<String>(isolate->factory()->NewConsString(
-        isolate->factory()->LookupAsciiSymbol(entry->name_prefix()),
-        isolate->factory()->LookupAsciiSymbol(entry->name()))));
+        isolate->factory()->LookupUtf8Symbol(entry->name_prefix()),
+        isolate->factory()->LookupUtf8Symbol(entry->name()))));
   }
 }
 
@@ -6036,7 +6045,7 @@
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptResourceName");
   const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
-  return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
+  return Handle<String>(ToApi<String>(isolate->factory()->LookupUtf8Symbol(
       node->entry()->resource_name())));
 }
 
@@ -6122,7 +6131,7 @@
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::CpuProfile::GetTitle");
   const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
-  return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
+  return Handle<String>(ToApi<String>(isolate->factory()->LookupUtf8Symbol(
       profile->title())));
 }
 
@@ -6219,7 +6228,7 @@
     case i::HeapGraphEdge::kInternal:
     case i::HeapGraphEdge::kProperty:
     case i::HeapGraphEdge::kShortcut:
-      return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
+      return Handle<String>(ToApi<String>(isolate->factory()->LookupUtf8Symbol(
           edge->name())));
     case i::HeapGraphEdge::kElement:
     case i::HeapGraphEdge::kHidden:
@@ -6263,7 +6272,7 @@
 Handle<String> HeapGraphNode::GetName() const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapGraphNode::GetName");
-  return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
+  return Handle<String>(ToApi<String>(isolate->factory()->LookupUtf8Symbol(
       ToInternal(this)->name())));
 }
 
@@ -6342,7 +6351,7 @@
 Handle<String> HeapSnapshot::GetTitle() const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapSnapshot::GetTitle");
-  return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
+  return Handle<String>(ToApi<String>(isolate->factory()->LookupUtf8Symbol(
       ToInternal(this)->title())));
 }
 
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index acd61fe..a31b7e9 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -47,6 +47,33 @@
 namespace internal {
 
 
+int Register::NumAllocatableRegisters() {
+  if (CpuFeatures::IsSupported(VFP2)) {
+    return kMaxNumAllocatableRegisters;
+  } else {
+    return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double;
+  }
+}
+
+
+int DwVfpRegister::NumRegisters() {
+  if (CpuFeatures::IsSupported(VFP2)) {
+    return DwVfpRegister::kNumRegisters;
+  } else {
+    return 1;
+  }
+}
+
+
+int DwVfpRegister::NumAllocatableRegisters() {
+  if (CpuFeatures::IsSupported(VFP2)) {
+    return DwVfpRegister::kMaxNumAllocatableRegisters;
+  } else {
+    return 1;
+  }
+}
+
+
 int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
   ASSERT(!reg.is(kDoubleRegZero));
   ASSERT(!reg.is(kScratchDoubleReg));
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 47ea0e2..52edb39 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -85,6 +85,33 @@
 }
 
 
+const char* DwVfpRegister::AllocationIndexToString(int index) {
+  if (CpuFeatures::IsSupported(VFP2)) {
+    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    const char* const names[] = {
+      "d0",
+      "d1",
+      "d2",
+      "d3",
+      "d4",
+      "d5",
+      "d6",
+      "d7",
+      "d8",
+      "d9",
+      "d10",
+      "d11",
+      "d12",
+      "d13"
+    };
+    return names[index];
+  } else {
+    ASSERT(index == 0);
+    return "sfpd0";
+  }
+}
+
+
 void CpuFeatures::Probe() {
   unsigned standard_features = static_cast<unsigned>(
       OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 3b9bb80..a361c7e 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -71,21 +71,24 @@
 // Core register
 struct Register {
   static const int kNumRegisters = 16;
-  static const int kNumAllocatableRegisters = 8;
+  static const int kMaxNumAllocatableRegisters = 8;
   static const int kSizeInBytes = 4;
+  static const int kGPRsPerNonVFP2Double = 2;
+
+  inline static int NumAllocatableRegisters();
 
   static int ToAllocationIndex(Register reg) {
-    ASSERT(reg.code() < kNumAllocatableRegisters);
+    ASSERT(reg.code() < kMaxNumAllocatableRegisters);
     return reg.code();
   }
 
   static Register FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
     return from_code(index);
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
     const char* const names[] = {
       "r0",
       "r1",
@@ -165,7 +168,6 @@
 const Register lr  = { kRegister_lr_Code };
 const Register pc  = { kRegister_pc_Code };
 
-
 // Single word VFP register.
 struct SwVfpRegister {
   bool is_valid() const { return 0 <= code_ && code_ < 32; }
@@ -196,37 +198,19 @@
   //  d14: 0.0
   //  d15: scratch register.
   static const int kNumReservedRegisters = 2;
-  static const int kNumAllocatableRegisters = kNumRegisters -
+  static const int kMaxNumAllocatableRegisters = kNumRegisters -
       kNumReservedRegisters;
 
+  inline static int NumRegisters();
+  inline static int NumAllocatableRegisters();
   inline static int ToAllocationIndex(DwVfpRegister reg);
+  static const char* AllocationIndexToString(int index);
 
   static DwVfpRegister FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
     return from_code(index);
   }
 
-  static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
-    const char* const names[] = {
-      "d0",
-      "d1",
-      "d2",
-      "d3",
-      "d4",
-      "d5",
-      "d6",
-      "d7",
-      "d8",
-      "d9",
-      "d10",
-      "d11",
-      "d12",
-      "d13"
-    };
-    return names[index];
-  }
-
   static DwVfpRegister from_code(int code) {
     DwVfpRegister r = { code };
     return r;
@@ -323,6 +307,9 @@
 const DwVfpRegister d14 = { 14 };
 const DwVfpRegister d15 = { 15 };
 
+const Register sfpd_lo  = { kRegister_r6_Code };
+const Register sfpd_hi  = { kRegister_r7_Code };
+
 // Aliases for double registers.  Defined using #define instead of
 // "static const DwVfpRegister&" because Clang complains otherwise when a
 // compilation unit that includes this header doesn't use the variables.
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 24d14e8..28e00dd 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -1259,6 +1259,26 @@
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
 
 
+void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
+    // Pass the function and deoptimization type to the runtime system.
+    __ CallRuntime(Runtime::kNotifyICMiss, 0);
+    __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
+  }
+
+  __ mov(ip, lr);  // Stash the miss continuation
+  __ add(sp, sp, Operand(kPointerSize));  // Ignore state
+  __ pop(lr);  // Restore LR to continuation in JSFunction
+  __ mov(pc, ip);  // Jump to miss handler
+}
+
+
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
   {
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 9484f85..d8cbd08 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -37,6 +37,17 @@
 namespace internal {
 
 
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { r1, r0 };
+  descriptor->register_param_count_ = 2;
+  descriptor->register_params_ = registers;
+  descriptor->deoptimization_handler_ =
+      isolate->builtins()->KeyedLoadIC_Miss();
+}
+
+
 #define __ ACCESS_MASM(masm)
 
 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
@@ -503,7 +514,7 @@
 // 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
 // scratch register.  Destroys the source register.  No GC occurs during this
 // stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public CodeStub {
+class ConvertToDoubleStub : public PlatformCodeStub {
  public:
   ConvertToDoubleStub(Register result_reg_1,
                       Register result_reg_2,
@@ -2430,33 +2441,112 @@
       // We fall through here if we multiplied a negative number with 0, because
       // that would mean we should produce -0.
       break;
-    case Token::DIV:
+    case Token::DIV: {
+      Label div_with_sdiv;
+
+      // Check for 0 divisor.
+      __ cmp(right, Operand(0));
+      __ b(eq, &not_smi_result);
+
       // Check for power of two on the right hand side.
-      __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
-      // Check for positive and no remainder (scratch1 contains right - 1).
-      __ orr(scratch2, scratch1, Operand(0x80000000u));
-      __ tst(left, scratch2);
-      __ b(ne, &not_smi_result);
+      __ sub(scratch1, right, Operand(1));
+      __ tst(scratch1, right);
+      if (CpuFeatures::IsSupported(SUDIV)) {
+        __ b(ne, &div_with_sdiv);
+        // Check for no remainder.
+        __ tst(left, scratch1);
+        __ b(ne, &not_smi_result);
+        // Check for positive left hand side.
+        __ cmp(left, Operand(0));
+        __ b(mi, &div_with_sdiv);
+      } else {
+        __ b(ne, &not_smi_result);
+        // Check for positive and no remainder.
+        __ orr(scratch2, scratch1, Operand(0x80000000u));
+        __ tst(left, scratch2);
+        __ b(ne, &not_smi_result);
+      }
 
       // Perform division by shifting.
       __ CountLeadingZeros(scratch1, scratch1, scratch2);
       __ rsb(scratch1, scratch1, Operand(31));
       __ mov(right, Operand(left, LSR, scratch1));
       __ Ret();
+
+      if (CpuFeatures::IsSupported(SUDIV)) {
+        Label result_not_zero;
+
+        __ bind(&div_with_sdiv);
+        // Do division.
+        __ sdiv(scratch1, left, right);
+        // Check that the remainder is zero.
+        __ mls(scratch2, scratch1, right, left);
+        __ cmp(scratch2, Operand(0));
+        __ b(ne, &not_smi_result);
+        // Check for negative zero result.
+        __ cmp(scratch1, Operand(0));
+        __ b(ne, &result_not_zero);
+        __ cmp(right, Operand(0));
+        __ b(lt, &not_smi_result);
+        __ bind(&result_not_zero);
+        // Check for the corner case of dividing the most negative smi by -1.
+        __ cmp(scratch1, Operand(0x40000000));
+        __ b(eq, &not_smi_result);
+        // Tag and return the result.
+        __ SmiTag(right, scratch1);
+        __ Ret();
+      }
       break;
-    case Token::MOD:
-      // Check for two positive smis.
-      __ orr(scratch1, left, Operand(right));
-      __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
-      __ b(ne, &not_smi_result);
+    }
+    case Token::MOD: {
+      Label modulo_with_sdiv;
 
-      // Check for power of two on the right hand side.
-      __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
+      if (CpuFeatures::IsSupported(SUDIV)) {
+        // Check for x % 0.
+        __ cmp(right, Operand(0));
+        __ b(eq, &not_smi_result);
 
-      // Perform modulus by masking.
+        // Check for two positive smis.
+        __ orr(scratch1, left, Operand(right));
+        __ tst(scratch1, Operand(0x80000000u));
+        __ b(ne, &modulo_with_sdiv);
+
+        // Check for power of two on the right hand side.
+        __ sub(scratch1, right, Operand(1));
+        __ tst(scratch1, right);
+        __ b(ne, &modulo_with_sdiv);
+      } else {
+        // Check for two positive smis.
+        __ orr(scratch1, left, Operand(right));
+        __ tst(scratch1, Operand(0x80000000u));
+        __ b(ne, &not_smi_result);
+
+        // Check for power of two on the right hand side.
+        __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
+      }
+
+      // Perform modulus by masking (scratch1 contains right - 1).
       __ and_(right, left, Operand(scratch1));
       __ Ret();
+
+      if (CpuFeatures::IsSupported(SUDIV)) {
+        __ bind(&modulo_with_sdiv);
+        __ mov(scratch2, right);
+        // Perform modulus with sdiv and mls.
+        __ sdiv(scratch1, left, right);
+        __ mls(right, scratch1, right, left);
+        // Return if the result is not 0.
+        __ cmp(right, Operand(0));
+        __ Ret(ne);
+        // The result is 0, check for -0 case.
+        __ cmp(left, Operand(0));
+        __ Ret(pl);
+        // This is a -0 case, restore the value of right.
+        __ mov(right, scratch2);
+        // We fall through here to not_smi_result to produce -0.
+      }
       break;
+    }
     case Token::BIT_OR:
       __ orr(right, left, Operand(right));
       __ Ret();
@@ -3568,10 +3658,10 @@
   const Register exponent = r2;
   const Register heapnumbermap = r5;
   const Register heapnumber = r0;
-  const DoubleRegister double_base = d1;
-  const DoubleRegister double_exponent = d2;
-  const DoubleRegister double_result = d3;
-  const DoubleRegister double_scratch = d0;
+  const DwVfpRegister double_base = d1;
+  const DwVfpRegister double_exponent = d2;
+  const DwVfpRegister double_result = d3;
+  const DwVfpRegister double_scratch = d0;
   const SwVfpRegister single_scratch = s0;
   const Register scratch = r9;
   const Register scratch2 = r7;
@@ -3781,12 +3871,29 @@
 
 
 void CodeStub::GenerateFPStubs() {
-  CEntryStub save_doubles(1, kSaveFPRegs);
-  Handle<Code> code = save_doubles.GetCode();
-  code->set_is_pregenerated(true);
-  StoreBufferOverflowStub stub(kSaveFPRegs);
-  stub.GetCode()->set_is_pregenerated(true);
-  code->GetIsolate()->set_fp_stubs_generated(true);
+  SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
+      ? kSaveFPRegs
+      : kDontSaveFPRegs;
+  CEntryStub save_doubles(1, mode);
+  StoreBufferOverflowStub stub(mode);
+  // These stubs might already be in the snapshot, detect that and don't
+  // regenerate, which would lead to code stub initialization state being messed
+  // up.
+  Code* save_doubles_code = NULL;
+  Code* store_buffer_overflow_code = NULL;
+  if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
+    if (CpuFeatures::IsSupported(VFP2)) {
+      CpuFeatures::Scope scope2(VFP2);
+      save_doubles_code = *save_doubles.GetCode();
+      store_buffer_overflow_code = *stub.GetCode();
+    } else {
+      save_doubles_code = *save_doubles.GetCode();
+      store_buffer_overflow_code = *stub.GetCode();
+    }
+    save_doubles_code->set_is_pregenerated(true);
+    store_buffer_overflow_code->set_is_pregenerated(true);
+  }
+  ISOLATE->set_fp_stubs_generated(true);
 }
 
 
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 0443cf7..6f964a8 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -36,7 +36,7 @@
 
 // Compute a transcendental math function natively, or call the
 // TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
  public:
   enum ArgumentType {
     TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
@@ -58,7 +58,7 @@
 };
 
 
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
  public:
   explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
       : save_doubles_(save_fp) { }
@@ -77,7 +77,7 @@
 };
 
 
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
  public:
   UnaryOpStub(Token::Value op,
               UnaryOverwriteMode mode,
@@ -219,7 +219,7 @@
 };
 
 
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
  public:
   explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
 
@@ -242,7 +242,7 @@
 };
 
 
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
  public:
   SubStringStub() {}
 
@@ -255,7 +255,7 @@
 
 
 
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
  public:
   StringCompareStub() { }
 
@@ -295,7 +295,7 @@
 // This stub can convert a signed int32 to a heap number (double).  It does
 // not work for int32s that are in Smi range!  No GC occurs during this stub
 // so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
+class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
  public:
   WriteInt32ToHeapNumberStub(Register the_int,
                              Register the_heap_number,
@@ -329,7 +329,7 @@
 };
 
 
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
  public:
   NumberToStringStub() { }
 
@@ -355,7 +355,7 @@
 };
 
 
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
  public:
   RecordWriteStub(Register object,
                   Register value,
@@ -511,7 +511,7 @@
     Register GetRegThatIsNotOneOf(Register r1,
                                   Register r2,
                                   Register r3) {
-      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+      for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
         Register candidate = Register::FromAllocationIndex(i);
         if (candidate.is(r1)) continue;
         if (candidate.is(r2)) continue;
@@ -570,7 +570,7 @@
 // Enter C code from generated RegExp code in a way that allows
 // the C code to fix the return address in case of a GC.
 // Currently only needed on ARM.
-class RegExpCEntryStub: public CodeStub {
+class RegExpCEntryStub: public PlatformCodeStub {
  public:
   RegExpCEntryStub() {}
   virtual ~RegExpCEntryStub() {}
@@ -589,7 +589,7 @@
 // keep the code which called into native pinned in the memory. Currently the
 // simplest approach is to generate such stub early enough so it can never be
 // moved by GC
-class DirectCEntryStub: public CodeStub {
+class DirectCEntryStub: public PlatformCodeStub {
  public:
   DirectCEntryStub() {}
   void Generate(MacroAssembler* masm);
@@ -739,7 +739,7 @@
 };
 
 
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
  public:
   enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
 
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index bb771b1..5e8739c 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -73,10 +73,10 @@
 
   {
     CpuFeatures::Scope use_vfp(VFP2);
-    DoubleRegister input = d0;
-    DoubleRegister result = d1;
-    DoubleRegister double_scratch1 = d2;
-    DoubleRegister double_scratch2 = d3;
+    DwVfpRegister input = d0;
+    DwVfpRegister result = d1;
+    DwVfpRegister double_scratch1 = d2;
+    DwVfpRegister double_scratch2 = d3;
     Register temp1 = r4;
     Register temp2 = r5;
     Register temp3 = r6;
@@ -571,10 +571,10 @@
 
 
 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
-                                   DoubleRegister input,
-                                   DoubleRegister result,
-                                   DoubleRegister double_scratch1,
-                                   DoubleRegister double_scratch2,
+                                   DwVfpRegister input,
+                                   DwVfpRegister result,
+                                   DwVfpRegister double_scratch1,
+                                   DwVfpRegister double_scratch2,
                                    Register temp1,
                                    Register temp2,
                                    Register temp3) {
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 8f0033e..75899a9 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -44,6 +44,10 @@
 
 class CodeGenerator: public AstVisitor {
  public:
+  CodeGenerator() {
+    InitializeAstVisitor();
+  }
+
   static bool MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
@@ -68,6 +72,8 @@
                               int pos,
                               bool right_here = false);
 
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
@@ -92,10 +98,10 @@
 class MathExpGenerator : public AllStatic {
  public:
   static void EmitMathExp(MacroAssembler* masm,
-                          DoubleRegister input,
-                          DoubleRegister result,
-                          DoubleRegister double_scratch1,
-                          DoubleRegister double_scratch2,
+                          DwVfpRegister input,
+                          DwVfpRegister result,
+                          DwVfpRegister double_scratch1,
+                          DwVfpRegister double_scratch2,
                           Register temp1,
                           Register temp2,
                           Register temp3);
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index ee2a581..a263cb7 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -44,11 +44,14 @@
 }
 
 
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
-  HandleScope scope;
+void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
+    JSFunction* function) {
+  Isolate* isolate = function->GetIsolate();
+  HandleScope scope(isolate);
   AssertNoAllocation no_allocation;
 
-  if (!function->IsOptimized()) return;
+  ASSERT(function->IsOptimized());
+  ASSERT(function->FunctionsInFunctionListShareSameCode());
 
   // The optimized code is going to be patched, so we cannot use it
   // any more.  Play safe and reset the whole cache.
@@ -91,8 +94,6 @@
 #endif
   }
 
-  Isolate* isolate = code->GetIsolate();
-
   // Add the deoptimizing code to the list.
   DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
   DeoptimizerData* data = isolate->deoptimizer_data();
@@ -212,7 +213,7 @@
 
 void Deoptimizer::DoComputeOsrOutputFrame() {
   DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      optimized_code_->deoptimization_data());
+      compiled_code_->deoptimization_data());
   unsigned ast_id = data->OsrAstId()->value();
 
   int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
@@ -246,7 +247,7 @@
   unsigned input_frame_size = input_->GetFrameSize();
   ASSERT(fixed_size + height_in_bytes == input_frame_size);
 
-  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+  unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
   unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
   unsigned outgoing_size = outgoing_height * kPointerSize;
   unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -338,7 +339,7 @@
 
     unsigned pc_offset = data->OsrPcOffset()->value();
     uint32_t pc = reinterpret_cast<uint32_t>(
-        optimized_code_->entry() + pc_offset);
+        compiled_code_->entry() + pc_offset);
     output_[0]->SetPc(pc);
   }
   Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
@@ -451,6 +452,70 @@
 }
 
 
+void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
+                                      int frame_index) {
+  //
+  //               FROM                                  TO             <-fp
+  //    |          ....           |          |          ....           |
+  //    +-------------------------+          +-------------------------+
+  //    | JSFunction continuation |          | JSFunction continuation |
+  //    +-------------------------+          +-------------------------+<-sp
+  // |  |   saved frame (fp)      |
+  // |  +=========================+<-fp
+  // |  |   JSFunction context    |
+  // v  +-------------------------+
+  //    |   COMPILED_STUB marker  |          fp = saved frame
+  //    +-------------------------+          f8 = JSFunction context
+  //    |                         |
+  //    | ...                     |
+  //    |                         |
+  //    +-------------------------+<-sp
+  //
+  //
+  int output_frame_size = 1 * kPointerSize;
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, 0);
+  Code* notify_miss =
+      isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
+  output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+  output_frame->SetContinuation(
+      reinterpret_cast<intptr_t>(notify_miss->entry()));
+
+  ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+  int major_key = compiled_code_->major_key();
+  CodeStubInterfaceDescriptor* descriptor =
+      isolate_->code_stub_interface_descriptor(major_key);
+  Handle<Code> miss_ic(descriptor->deoptimization_handler_);
+  output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
+  unsigned input_frame_size = input_->GetFrameSize();
+  intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
+  output_frame->SetFrameSlot(0, value);
+  value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
+  output_frame->SetRegister(fp.code(), value);
+  output_frame->SetFp(value);
+  value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
+  output_frame->SetRegister(cp.code(), value);
+
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  ASSERT(opcode == Translation::REGISTER);
+  USE(opcode);
+  int input_reg = iterator->Next();
+  intptr_t input_value = input_->GetRegister(input_reg);
+  output_frame->SetRegister(r1.code(), input_value);
+
+  int32_t next = iterator->Next();
+  opcode = static_cast<Translation::Opcode>(next);
+  ASSERT(opcode == Translation::REGISTER);
+  input_reg = iterator->Next();
+  input_value = input_->GetRegister(input_reg);
+  output_frame->SetRegister(r0.code(), input_value);
+
+  ASSERT(frame_index == 0);
+  output_[frame_index] = output_frame;
+}
+
+
 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
                                               int frame_index) {
   Builtins* builtins = isolate_->builtins();
@@ -878,7 +943,7 @@
   }
   input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
   input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
     input_->SetDoubleRegister(i, 0.0);
   }
 
@@ -898,7 +963,6 @@
 
   Isolate* isolate = masm()->isolate();
 
-  CpuFeatures::Scope scope(VFP3);
   // Save all general purpose registers before messing with them.
   const int kNumberOfRegisters = Register::kNumRegisters;
 
@@ -906,23 +970,29 @@
   RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
 
   const int kDoubleRegsSize =
-      kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
+      kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
 
-  // Save all VFP registers before messing with them.
-  DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
-  DwVfpRegister last =
-      DwVfpRegister::FromAllocationIndex(
-          DwVfpRegister::kNumAllocatableRegisters - 1);
-  ASSERT(last.code() > first.code());
-  ASSERT((last.code() - first.code()) ==
-      (DwVfpRegister::kNumAllocatableRegisters - 1));
+  if (CpuFeatures::IsSupported(VFP2)) {
+    CpuFeatures::Scope scope(VFP2);
+    // Save all VFP registers before messing with them.
+    DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
+    DwVfpRegister last =
+        DwVfpRegister::FromAllocationIndex(
+            DwVfpRegister::kMaxNumAllocatableRegisters - 1);
+    ASSERT(last.code() > first.code());
+    ASSERT((last.code() - first.code()) ==
+           (DwVfpRegister::kMaxNumAllocatableRegisters - 1));
 #ifdef DEBUG
-  for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
-    ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
-           (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
-  }
+    int max = DwVfpRegister::kMaxNumAllocatableRegisters - 1;
+    for (int i = 0; i <= max; i++) {
+      ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
+             (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
+    }
 #endif
-  __ vstm(db_w, sp, first, last);
+    __ vstm(db_w, sp, first, last);
+  } else {
+    __ sub(sp, sp, Operand(kDoubleRegsSize));
+  }
 
   // Push all 16 registers (needed to populate FrameDescription::registers_).
   // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@@ -981,14 +1051,17 @@
     __ str(r2, MemOperand(r1, offset));
   }
 
-  // Copy VFP registers to
-  // double_registers_[DoubleRegister::kNumAllocatableRegisters]
-  int double_regs_offset = FrameDescription::double_registers_offset();
-  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
-    int dst_offset = i * kDoubleSize + double_regs_offset;
-    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
-    __ vldr(d0, sp, src_offset);
-    __ vstr(d0, r1, dst_offset);
+  if (CpuFeatures::IsSupported(VFP2)) {
+    CpuFeatures::Scope scope(VFP2);
+    // Copy VFP registers to
+    // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
+    int double_regs_offset = FrameDescription::double_registers_offset();
+    for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); ++i) {
+      int dst_offset = i * kDoubleSize + double_regs_offset;
+      int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+      __ vldr(d0, sp, src_offset);
+      __ vstr(d0, r1, dst_offset);
+    }
   }
 
   // Remove the bailout id, eventually return address, and the saved registers
@@ -1009,10 +1082,13 @@
   // frame description.
   __ add(r3,  r1, Operand(FrameDescription::frame_content_offset()));
   Label pop_loop;
+  Label pop_loop_header;
+  __ b(&pop_loop_header);
   __ bind(&pop_loop);
   __ pop(r4);
   __ str(r4, MemOperand(r3, 0));
   __ add(r3, r3, Operand(sizeof(uint32_t)));
+  __ bind(&pop_loop_header);
   __ cmp(r2, sp);
   __ b(ne, &pop_loop);
 
@@ -1029,24 +1105,29 @@
   __ pop(r0);  // Restore deoptimizer object (class Deoptimizer).
 
   // Replace the current (input) frame with the output frames.
-  Label outer_push_loop, inner_push_loop;
+  Label outer_push_loop, inner_push_loop,
+      outer_loop_header, inner_loop_header;
   // Outer loop state: r0 = current "FrameDescription** output_",
   // r1 = one past the last FrameDescription**.
   __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
   __ ldr(r0, MemOperand(r0, Deoptimizer::output_offset()));  // r0 is output_.
   __ add(r1, r0, Operand(r1, LSL, 2));
+  __ jmp(&outer_loop_header);
   __ bind(&outer_push_loop);
   // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
   __ ldr(r2, MemOperand(r0, 0));  // output_[ix]
   __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
+  __ jmp(&inner_loop_header);
   __ bind(&inner_push_loop);
   __ sub(r3, r3, Operand(sizeof(uint32_t)));
   __ add(r6, r2, Operand(r3));
   __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
   __ push(r7);
+  __ bind(&inner_loop_header);
   __ cmp(r3, Operand(0));
   __ b(ne, &inner_push_loop);  // test for gt?
   __ add(r0, r0, Operand(kPointerSize));
+  __ bind(&outer_loop_header);
   __ cmp(r0, r1);
   __ b(lt, &outer_push_loop);
 
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 3b560fe..4182f8c 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -2031,7 +2031,7 @@
 
 
 void FullCodeGenerator::EmitAssignment(Expression* expr) {
-  // Invalid left-hand sides are rewritten to have a 'throw
+  // Invalid left-hand sides are rewritten by the parser to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
     VisitForEffect(expr);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 4203673..38092a7 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -42,10 +42,10 @@
 #undef DEFINE_COMPILE
 
 LOsrEntry::LOsrEntry() {
-  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
     register_spills_[i] = NULL;
   }
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
     double_register_spills_[i] = NULL;
   }
 }
@@ -612,6 +612,7 @@
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
 #ifdef DEBUG
   instr->VerifyCall();
 #endif
@@ -1240,31 +1241,43 @@
 
 
 HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
-  // Only optimize when we have magic numbers for the divisor.
-  // The standard integer division routine is usually slower than transitionning
-  // to VFP.
-  if (divisor->IsConstant() &&
-      HConstant::cast(divisor)->HasInteger32Value()) {
+  if (CpuFeatures::IsSupported(SUDIV)) {
+    // A value with an integer representation does not need to be transformed.
+    if (divisor->representation().IsInteger32()) {
+      return divisor;
+    // A change from an integer32 can be replaced by the integer32 value.
+    } else if (divisor->IsChange() &&
+               HChange::cast(divisor)->from().IsInteger32()) {
+      return HChange::cast(divisor)->value();
+    }
+  }
+
+  if (divisor->IsConstant() && HConstant::cast(divisor)->HasInteger32Value()) {
     HConstant* constant_val = HConstant::cast(divisor);
     int32_t int32_val = constant_val->Integer32Value();
-    if (LChunkBuilder::HasMagicNumberForDivisor(int32_val)) {
+    if (LChunkBuilder::HasMagicNumberForDivisor(int32_val) ||
+        CpuFeatures::IsSupported(SUDIV)) {
       return constant_val->CopyToRepresentation(Representation::Integer32(),
                                                 divisor->block()->zone());
     }
   }
+
   return NULL;
 }
 
 
 LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
-    HValue* right = instr->right();
-    LOperand* dividend = UseRegister(instr->left());
-    LOperand* divisor = UseRegisterOrConstant(right);
-    LOperand* remainder = TempRegister();
-    ASSERT(right->IsConstant() &&
-           HConstant::cast(right)->HasInteger32Value() &&
-           HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()));
-    return AssignEnvironment(DefineAsRegister(
+  HValue* right = instr->right();
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = CpuFeatures::IsSupported(SUDIV)
+      ? UseRegister(right)
+      : UseOrConstant(right);
+  LOperand* remainder = TempRegister();
+  ASSERT(CpuFeatures::IsSupported(SUDIV) ||
+         (right->IsConstant() &&
+          HConstant::cast(right)->HasInteger32Value() &&
+          HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value())));
+  return AssignEnvironment(DefineAsRegister(
           new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
 }
 
@@ -1694,6 +1707,7 @@
   Representation to = instr->to();
   if (from.IsTagged()) {
     if (to.IsDouble()) {
+      info()->MarkAsDeferredCalling();
       LOperand* value = UseRegister(instr->value());
       LNumberUntagD* res = new(zone()) LNumberUntagD(value);
       return AssignEnvironment(DefineAsRegister(res));
@@ -1718,6 +1732,7 @@
     }
   } else if (from.IsDouble()) {
     if (to.IsTagged()) {
+      info()->MarkAsDeferredCalling();
       LOperand* value = UseRegister(instr->value());
       LOperand* temp1 = TempRegister();
       LOperand* temp2 = TempRegister();
@@ -1737,6 +1752,7 @@
       return AssignEnvironment(DefineAsRegister(res));
     }
   } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
     if (to.IsTagged()) {
       HValue* val = instr->value();
       LOperand* value = UseRegisterAtStart(val);
@@ -1974,7 +1990,16 @@
         (instr->representation().IsDouble() &&
          ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
           (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
-    LOperand* external_pointer = UseRegister(instr->elements());
+    // float->double conversion on non-VFP2 requires an extra scratch
+    // register. For convenience, just mark the elements register as "UseTemp"
+    // so that it can be used as a temp during the float->double conversion
+    // after it's no longer needed after the float load.
+    bool needs_temp =
+        !CpuFeatures::IsSupported(VFP2) &&
+        (elements_kind == EXTERNAL_FLOAT_ELEMENTS);
+    LOperand* external_pointer = needs_temp
+        ? UseTempRegister(instr->elements())
+        : UseRegister(instr->elements());
     result = new(zone()) LLoadKeyed(external_pointer, key);
   }
 
@@ -2192,8 +2217,17 @@
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
-  int spill_index = chunk()->GetParameterStackSlot(instr->index());
-  return DefineAsSpilled(new(zone()) LParameter, spill_index);
+  LParameter* result = new(zone()) LParameter;
+  if (info()->IsOptimizing()) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    ASSERT(info()->IsStub());
+    CodeStubInterfaceDescriptor* descriptor =
+        info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+    Register reg = descriptor->register_params_[instr->index()];
+    return DefineFixed(result, reg);
+  }
 }
 
 
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 7397b4b..1b589ce 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -256,6 +256,11 @@
   void MarkAsCall() { is_call_ = true; }
 
   // Interface to the register allocator and iterators.
+  bool ClobbersTemps() const { return is_call_; }
+  bool ClobbersRegisters() const { return is_call_; }
+  bool ClobbersDoubleRegisters() const { return is_call_; }
+
+  // Interface to the register allocator and iterators.
   bool IsMarkedAsCall() const { return is_call_; }
 
   virtual bool HasResult() const = 0;
@@ -2359,8 +2364,9 @@
   // slot, i.e., that must also be restored to the spill slot on OSR entry.
   // NULL if the register has no assigned spill slot.  Indexed by allocation
   // index.
-  LOperand* register_spills_[Register::kNumAllocatableRegisters];
-  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+  LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+  LOperand* double_register_spills_[
+      DoubleRegister::kMaxNumAllocatableRegisters];
 };
 
 
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 06b0216..0ac64fd 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -65,8 +65,6 @@
   HPhase phase("Z_Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
-  CpuFeatures::Scope scope1(VFP3);
-  CpuFeatures::Scope scope2(ARMv7);
 
   CodeStub::GenerateFPStubs();
 
@@ -118,37 +116,38 @@
 bool LCodeGen::GeneratePrologue() {
   ASSERT(is_generating());
 
-  ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
 #ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-    __ stop("stop_at");
-  }
+    if (strlen(FLAG_stop_at) > 0 &&
+        info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+      __ stop("stop_at");
+    }
 #endif
 
-  // r1: Callee's JS function.
-  // cp: Callee's context.
-  // fp: Caller's frame pointer.
-  // lr: Caller's pc.
+    // r1: Callee's JS function.
+    // cp: Callee's context.
+    // fp: Caller's frame pointer.
+    // lr: Caller's pc.
 
-  // Strict mode functions and builtins need to replace the receiver
-  // with undefined when called as functions (without an explicit
-  // receiver object). r5 is zero for method calls and non-zero for
-  // function calls.
-  if (!info_->is_classic_mode() || info_->is_native()) {
-    Label ok;
-    __ cmp(r5, Operand(0));
-    __ b(eq, &ok);
-    int receiver_offset = scope()->num_parameters() * kPointerSize;
-    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
-    __ str(r2, MemOperand(sp, receiver_offset));
-    __ bind(&ok);
+    // Strict mode functions and builtins need to replace the receiver
+    // with undefined when called as functions (without an explicit
+    // receiver object). r5 is zero for method calls and non-zero for
+    // function calls.
+    if (!info_->is_classic_mode() || info_->is_native()) {
+      Label ok;
+      __ cmp(r5, Operand(0));
+      __ b(eq, &ok);
+      int receiver_offset = scope()->num_parameters() * kPointerSize;
+      __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+      __ str(r2, MemOperand(sp, receiver_offset));
+      __ bind(&ok);
+    }
   }
 
-
   info()->set_prologue_offset(masm_->pc_offset());
-  {
+  if (NeedsEagerFrame()) {
     PredictableCodeSizeScope predictible_code_size_scope(
         masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
     // The following three instructions must remain together and unmodified
@@ -159,6 +158,7 @@
     __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
     // Adjust FP to point to saved FP.
     __ add(fp, sp, Operand(2 * kPointerSize));
+    frame_is_built_ = true;
   }
 
   // Reserve space for the stack slots needed by the code.
@@ -178,7 +178,7 @@
   }
 
   // Possibly allocate a local context.
-  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment(";;; Allocate local context");
     // Argument to NewContext is the function, which is in r1.
@@ -214,7 +214,7 @@
   }
 
   // Trace the call.
-  if (FLAG_trace) {
+  if (FLAG_trace && info()->IsOptimizing()) {
     __ CallRuntime(Runtime::kTraceEnter, 0);
   }
   return !is_aborted();
@@ -272,10 +272,31 @@
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Deferred build frame",
+                code->instruction_index(),
+                code->instr()->Mnemonic());
+        ASSERT(!frame_is_built_);
+        ASSERT(info()->IsStub());
+        frame_is_built_ = true;
+        __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+        __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+        __ push(scratch0());
+        __ add(fp, sp, Operand(2 * kPointerSize));
+      }
       Comment(";;; Deferred code @%d: %s.",
               code->instruction_index(),
               code->instr()->Mnemonic());
       code->Generate();
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Deferred destroy frame",
+                code->instruction_index(),
+                code->instr()->Mnemonic());
+        ASSERT(frame_is_built_);
+        __ pop(ip);
+        __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
+        frame_is_built_ = false;
+      }
       __ jmp(code->exit());
     }
   }
@@ -297,24 +318,68 @@
   // Each entry in the jump table generates one instruction and inlines one
   // 32bit data after it.
   if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
-      deopt_jump_table_.length() * 2)) {
+      deopt_jump_table_.length() * 7)) {
     Abort("Generated code is too large");
   }
 
-  // Block the constant pool emission during the jump table emission.
-  __ BlockConstPoolFor(deopt_jump_table_.length());
   __ RecordComment("[ Deoptimisation jump table");
   Label table_start;
   __ bind(&table_start);
+  Label needs_frame_not_call;
+  Label needs_frame_is_call;
   for (int i = 0; i < deopt_jump_table_.length(); i++) {
     __ bind(&deopt_jump_table_[i].label);
-    __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
-    __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
+    Address entry = deopt_jump_table_[i].address;
+    if (deopt_jump_table_[i].needs_frame) {
+      __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
+      if (deopt_jump_table_[i].is_lazy_deopt) {
+        if (needs_frame_is_call.is_bound()) {
+          __ b(&needs_frame_is_call);
+        } else {
+          __ bind(&needs_frame_is_call);
+          __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+          // This variant of deopt can only be used with stubs. Since we don't
+          // have a function pointer to install in the stack frame that we're
+          // building, install a special marker there instead.
+          ASSERT(info()->IsStub());
+          __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+          __ push(scratch0());
+          __ add(fp, sp, Operand(2 * kPointerSize));
+          __ mov(lr, Operand(pc), LeaveCC, al);
+          __ mov(pc, ip);
+        }
+      } else {
+        if (needs_frame_not_call.is_bound()) {
+          __ b(&needs_frame_not_call);
+        } else {
+          __ bind(&needs_frame_not_call);
+          __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+          // This variant of deopt can only be used with stubs. Since we don't
+          // have a function pointer to install in the stack frame that we're
+          // building, install a special marker there instead.
+          ASSERT(info()->IsStub());
+          __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+          __ push(scratch0());
+          __ add(fp, sp, Operand(2 * kPointerSize));
+          __ mov(pc, ip);
+        }
+      }
+    } else {
+      if (deopt_jump_table_[i].is_lazy_deopt) {
+        __ mov(lr, Operand(pc), LeaveCC, al);
+        __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
+      } else {
+        __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
+      }
+    }
+    masm()->CheckConstPool(false, false);
   }
-  ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
-      deopt_jump_table_.length() * 2);
   __ RecordComment("]");
 
+  // Force constant pool emission at the end of the deopt jump table to make
+  // sure that no constant pools are emitted after.
+  masm()->CheckConstPool(true, false);
+
   // The deoptimization jump table is the last part of the instruction
   // sequence. Mark the generated code as done unless we bailed out.
   if (!is_aborted()) status_ = DONE;
@@ -334,8 +399,8 @@
 }
 
 
-DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
-  return DoubleRegister::FromAllocationIndex(index);
+DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
+  return DwVfpRegister::FromAllocationIndex(index);
 }
 
 
@@ -376,15 +441,15 @@
 }
 
 
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
   ASSERT(op->IsDoubleRegister());
   return ToDoubleRegister(op->index());
 }
 
 
-DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
-                                                SwVfpRegister flt_scratch,
-                                                DoubleRegister dbl_scratch) {
+DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+                                               SwVfpRegister flt_scratch,
+                                               DwVfpRegister dbl_scratch) {
   if (op->IsDoubleRegister()) {
     return ToDoubleRegister(op->index());
   } else if (op->IsConstantOperand()) {
@@ -520,7 +585,9 @@
                    translation,
                    arguments_index,
                    arguments_count);
-  int closure_id = *info()->closure() != *environment->closure()
+  bool has_closure_id = !info()->closure().is_null() &&
+      *info()->closure() != *environment->closure();
+  int closure_id = has_closure_id
       ? DefineDeoptimizationLiteral(environment->closure())
       : Translation::kSelfLiteralId;
 
@@ -541,6 +608,9 @@
       ASSERT(height == 0);
       translation->BeginSetterStubFrame(closure_id);
       break;
+    case STUB:
+      translation->BeginCompiledStubFrame();
+      break;
     case ARGUMENTS_ADAPTOR:
       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
       break;
@@ -736,7 +806,11 @@
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+
+  Deoptimizer::BailoutType bailout_type = info()->IsStub()
+      ? Deoptimizer::LAZY
+      : Deoptimizer::EAGER;
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
   if (entry == NULL) {
     Abort("bailout was not prepared");
     return;
@@ -752,14 +826,19 @@
 
   if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
 
-  if (cc == al) {
+  bool needs_lazy_deopt = info()->IsStub();
+  ASSERT(info()->IsStub() || frame_is_built_);
+  if (cc == al && !needs_lazy_deopt) {
     __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
     if (deopt_jump_table_.is_empty() ||
-        (deopt_jump_table_.last().address != entry)) {
-      deopt_jump_table_.Add(JumpTableEntry(entry), zone());
+        (deopt_jump_table_.last().address != entry) ||
+        (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
+        (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
+      JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
+      deopt_jump_table_.Add(table_entry, zone());
     }
     __ b(cc, &deopt_jump_table_.last().label);
   }
@@ -1026,6 +1105,16 @@
       DeoptimizeIf(eq, instr->environment());
     }
 
+    // Check for (kMinInt % -1).
+    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+      Label left_not_min_int;
+      __ cmp(left, Operand(kMinInt));
+      __ b(ne, &left_not_min_int);
+      __ cmp(right, Operand(-1));
+      DeoptimizeIf(eq, instr->environment());
+      __ bind(&left_not_min_int);
+    }
+
     // For  r3 = r1 % r2; we can have the following ARM code
     // sdiv r3, r1, r2
     // mls r3, r3, r2, r1
@@ -1055,6 +1144,8 @@
 
     Label vfp_modulo, both_positive, right_negative;
 
+    CpuFeatures::Scope scope(VFP2);
+
     // Check for x % 0.
     if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
       __ cmp(right, Operand(0));
@@ -1277,7 +1368,7 @@
     __ bind(&left_not_zero);
   }
 
-  // Check for (-kMinInt / -1).
+  // Check for (kMinInt / -1).
   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
     Label left_not_min_int;
     __ cmp(left, Operand(kMinInt));
@@ -1342,25 +1433,68 @@
   const Register remainder = ToRegister(instr->temp());
   const Register scratch = scratch0();
 
-  // We only optimize this for division by constants, because the standard
-  // integer division routine is usually slower than transitionning to VFP.
-  // This could be optimized on processors with SDIV available.
-  ASSERT(instr->right()->IsConstantOperand());
-  int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
-  if (divisor < 0) {
-    __ cmp(left, Operand(0));
+  if (!CpuFeatures::IsSupported(SUDIV)) {
+    // If the CPU doesn't support sdiv instruction, we only optimize when we
+    // have magic numbers for the divisor. The standard integer division routine
+    // is usually slower than transitionning to VFP.
+    ASSERT(instr->right()->IsConstantOperand());
+    int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
+    ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
+    if (divisor < 0) {
+      __ cmp(left, Operand(0));
+      DeoptimizeIf(eq, instr->environment());
+    }
+    EmitSignedIntegerDivisionByConstant(result,
+                                        left,
+                                        divisor,
+                                        remainder,
+                                        scratch,
+                                        instr->environment());
+    // We performed a truncating division. Correct the result if necessary.
+    __ cmp(remainder, Operand(0));
+    __ teq(remainder, Operand(divisor), ne);
+    __ sub(result, result, Operand(1), LeaveCC, mi);
+  } else {
+    CpuFeatures::Scope scope(SUDIV);
+    const Register right = ToRegister(instr->right());
+
+    // Check for x / 0.
+    __ cmp(right, Operand(0));
     DeoptimizeIf(eq, instr->environment());
+
+    // Check for (kMinInt / -1).
+    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+      Label left_not_min_int;
+      __ cmp(left, Operand(kMinInt));
+      __ b(ne, &left_not_min_int);
+      __ cmp(right, Operand(-1));
+      DeoptimizeIf(eq, instr->environment());
+      __ bind(&left_not_min_int);
+    }
+
+    // Check for (0 / -x) that will produce negative zero.
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ cmp(right, Operand(0));
+      __ cmp(left, Operand(0), mi);
+      // "right" can't be null because the code would have already been
+      // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
+      // In this case we need to deoptimize to produce a -0.
+      DeoptimizeIf(eq, instr->environment());
+    }
+
+    Label done;
+    __ sdiv(result, left, right);
+    // If both operands have the same sign then we are done.
+    __ eor(remainder, left, Operand(right), SetCC);
+    __ b(pl, &done);
+
+    // Check if the result needs to be corrected.
+    __ mls(remainder, result, right, left);
+    __ cmp(remainder, Operand(0));
+    __ sub(result, result, Operand(1), LeaveCC, ne);
+
+    __ bind(&done);
   }
-  EmitSignedIntegerDivisionByConstant(result,
-                                      left,
-                                      divisor,
-                                      remainder,
-                                      scratch,
-                                      instr->environment());
-  // We operated a truncating division. Correct the result if necessary.
-  __ cmp(remainder, Operand(0));
-  __ teq(remainder, Operand(divisor), ne);
-  __ sub(result, result, Operand(1), LeaveCC, mi);
 }
 
 
@@ -1368,6 +1502,7 @@
                                       LOperand* left_argument,
                                       LOperand* right_argument,
                                       Token::Value op) {
+  CpuFeatures::Scope vfp_scope(VFP2);
   Register left = ToRegister(left_argument);
   Register right = ToRegister(right_argument);
 
@@ -1653,6 +1788,7 @@
 void LCodeGen::DoConstantD(LConstantD* instr) {
   ASSERT(instr->result()->IsDoubleRegister());
   DwVfpRegister result = ToDoubleRegister(instr->result());
+  CpuFeatures::Scope scope(VFP2);
   double v = instr->value();
   __ Vmov(result, v, scratch0());
 }
@@ -1830,9 +1966,10 @@
     __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
   } else {
     ASSERT(instr->hydrogen()->representation().IsDouble());
-    DoubleRegister left_reg = ToDoubleRegister(left);
-    DoubleRegister right_reg = ToDoubleRegister(right);
-    DoubleRegister result_reg = ToDoubleRegister(instr->result());
+    CpuFeatures::Scope scope(VFP2);
+    DwVfpRegister left_reg = ToDoubleRegister(left);
+    DwVfpRegister right_reg = ToDoubleRegister(right);
+    DwVfpRegister result_reg = ToDoubleRegister(instr->result());
     Label check_nan_left, check_zero, return_left, return_right, done;
     __ VFPCompareAndSetFlags(left_reg, right_reg);
     __ b(vs, &check_nan_left);
@@ -1875,9 +2012,10 @@
 
 
 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
-  DoubleRegister left = ToDoubleRegister(instr->left());
-  DoubleRegister right = ToDoubleRegister(instr->right());
-  DoubleRegister result = ToDoubleRegister(instr->result());
+  CpuFeatures::Scope scope(VFP2);
+  DwVfpRegister left = ToDoubleRegister(instr->left());
+  DwVfpRegister right = ToDoubleRegister(instr->right());
+  DwVfpRegister result = ToDoubleRegister(instr->result());
   switch (instr->op()) {
     case Token::ADD:
       __ vadd(result, left, right);
@@ -1965,7 +2103,8 @@
     __ cmp(reg, Operand(0));
     EmitBranch(true_block, false_block, ne);
   } else if (r.IsDouble()) {
-    DoubleRegister reg = ToDoubleRegister(instr->value());
+    CpuFeatures::Scope scope(VFP2);
+    DwVfpRegister reg = ToDoubleRegister(instr->value());
     Register scratch = scratch0();
 
     // Test the double value. Zero and NaN are false.
@@ -2050,8 +2189,9 @@
       }
 
       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        CpuFeatures::Scope scope(VFP2);
         // heap number -> false iff +0, -0, or NaN.
-        DoubleRegister dbl_scratch = double_scratch0();
+        DwVfpRegister dbl_scratch = double_scratch0();
         Label not_heap_number;
         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
         __ b(ne, &not_heap_number);
@@ -2129,6 +2269,7 @@
     EmitGoto(next_block);
   } else {
     if (instr->is_double()) {
+      CpuFeatures::Scope scope(VFP2);
       // Compare left and right operands as doubles and load the
       // resulting flags into the normal status register.
       __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
@@ -2667,16 +2808,21 @@
 
 
 void LCodeGen::DoReturn(LReturn* instr) {
-  if (FLAG_trace) {
+  if (FLAG_trace && info()->IsOptimizing()) {
     // Push the return value on the stack as the parameter.
     // Runtime::TraceExit returns its parameter in r0.
     __ push(r0);
     __ CallRuntime(Runtime::kTraceExit, 1);
   }
-  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
-  __ mov(sp, fp);
-  __ ldm(ia_w, sp, fp.bit() | lr.bit());
-  __ add(sp, sp, Operand(sp_delta));
+  if (NeedsEagerFrame()) {
+    int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
+    __ mov(sp, fp);
+    __ ldm(ia_w, sp, fp.bit() | lr.bit());
+    __ add(sp, sp, Operand(sp_delta));
+  }
+  if (info()->IsStub()) {
+    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
   __ Jump(lr);
 }
 
@@ -3026,17 +3172,63 @@
 
   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
       elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-    CpuFeatures::Scope scope(VFP3);
     DwVfpRegister result = ToDoubleRegister(instr->result());
     Operand operand = key_is_constant
         ? Operand(constant_key << element_size_shift)
         : Operand(key, LSL, shift_size);
     __ add(scratch0(), external_pointer, operand);
-    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
-      __ vldr(result.low(), scratch0(), additional_offset);
-      __ vcvt_f64_f32(result, result.low());
-    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
-      __ vldr(result, scratch0(), additional_offset);
+    if (CpuFeatures::IsSupported(VFP2)) {
+      CpuFeatures::Scope scope(VFP2);
+      if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+        __ vldr(result.low(), scratch0(), additional_offset);
+        __ vcvt_f64_f32(result, result.low());
+      } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+        __ vldr(result, scratch0(), additional_offset);
+      }
+    } else {
+      if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+        Register value = external_pointer;
+        __ ldr(value, MemOperand(scratch0(), additional_offset));
+        __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask));
+
+        __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits));
+        __ and_(scratch0(), scratch0(),
+                Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+        Label exponent_rebiased;
+        __ teq(scratch0(), Operand(0x00));
+        __ b(eq, &exponent_rebiased);
+
+        __ teq(scratch0(), Operand(0xff));
+        __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq);
+        __ b(eq, &exponent_rebiased);
+
+        // Rebias exponent.
+        __ add(scratch0(),
+               scratch0(),
+               Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+        __ bind(&exponent_rebiased);
+        __ and_(sfpd_hi, value, Operand(kBinary32SignMask));
+        __ orr(sfpd_hi, sfpd_hi,
+               Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord));
+
+        // Shift mantissa.
+        static const int kMantissaShiftForHiWord =
+            kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+        static const int kMantissaShiftForLoWord =
+            kBitsPerInt - kMantissaShiftForHiWord;
+
+        __ orr(sfpd_hi, sfpd_hi,
+               Operand(sfpd_lo, LSR, kMantissaShiftForHiWord));
+        __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord));
+
+      } else {
+        __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset));
+        __ ldr(sfpd_hi, MemOperand(scratch0(),
+                                   additional_offset + kPointerSize));
+      }
     }
   } else {
     Register result = ToRegister(instr->result());
@@ -3105,23 +3297,28 @@
     key = ToRegister(instr->key());
   }
 
-  Operand operand = key_is_constant
-      ? Operand(((constant_key + instr->additional_index()) <<
-                 element_size_shift) +
-                FixedDoubleArray::kHeaderSize - kHeapObjectTag)
-      : Operand(key, LSL, shift_size);
-  __ add(elements, elements, operand);
+  int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
+      ((constant_key + instr->additional_index()) << element_size_shift);
   if (!key_is_constant) {
-    __ add(elements, elements,
-           Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
-                   (instr->additional_index() << element_size_shift)));
+    __ add(elements, elements, Operand(key, LSL, shift_size));
   }
-
-  __ vldr(result, elements, 0);
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
-    __ cmp(scratch, Operand(kHoleNanUpper32));
-    DeoptimizeIf(eq, instr->environment());
+  if (CpuFeatures::IsSupported(VFP2)) {
+    CpuFeatures::Scope scope(VFP2);
+    __ add(elements, elements, Operand(base_offset));
+    __ vldr(result, elements, 0);
+    if (instr->hydrogen()->RequiresHoleCheck()) {
+      __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+      __ cmp(scratch, Operand(kHoleNanUpper32));
+      DeoptimizeIf(eq, instr->environment());
+    }
+  } else {
+      __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
+      __ ldr(sfpd_lo, MemOperand(elements, base_offset));
+    if (instr->hydrogen()->RequiresHoleCheck()) {
+      ASSERT(kPointerSize == sizeof(kHoleNanLower32));
+      __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
+      DeoptimizeIf(eq, instr->environment());
+    }
   }
 }
 
@@ -3557,6 +3754,7 @@
 
 
 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+  CpuFeatures::Scope scope(VFP2);
   // Class for deferred case.
   class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
    public:
@@ -3593,7 +3791,8 @@
 
 
 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
+  CpuFeatures::Scope scope(VFP2);
+  DwVfpRegister input = ToDoubleRegister(instr->value());
   Register result = ToRegister(instr->result());
   Register scratch = scratch0();
 
@@ -3618,7 +3817,8 @@
 
 
 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
+  CpuFeatures::Scope scope(VFP2);
+  DwVfpRegister input = ToDoubleRegister(instr->value());
   Register result = ToRegister(instr->result());
   DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
   Register scratch = scratch0();
@@ -3683,16 +3883,18 @@
 
 
 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
-  DoubleRegister result = ToDoubleRegister(instr->result());
+  CpuFeatures::Scope scope(VFP2);
+  DwVfpRegister input = ToDoubleRegister(instr->value());
+  DwVfpRegister result = ToDoubleRegister(instr->result());
   __ vsqrt(result, input);
 }
 
 
 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
-  DoubleRegister result = ToDoubleRegister(instr->result());
-  DoubleRegister temp = ToDoubleRegister(instr->temp());
+  CpuFeatures::Scope scope(VFP2);
+  DwVfpRegister input = ToDoubleRegister(instr->value());
+  DwVfpRegister result = ToDoubleRegister(instr->result());
+  DwVfpRegister temp = ToDoubleRegister(instr->temp());
 
   // Note that according to ECMA-262 15.8.2.13:
   // Math.pow(-Infinity, 0.5) == Infinity
@@ -3711,6 +3913,7 @@
 
 
 void LCodeGen::DoPower(LPower* instr) {
+  CpuFeatures::Scope scope(VFP2);
   Representation exponent_type = instr->hydrogen()->right()->representation();
   // Having marked this as a call, we can use any registers.
   // Just make sure that the input/output registers are the expected ones.
@@ -3743,6 +3946,7 @@
 
 
 void LCodeGen::DoRandom(LRandom* instr) {
+  CpuFeatures::Scope scope(VFP2);
   class DeferredDoRandom: public LDeferredCode {
    public:
     DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
@@ -3821,10 +4025,11 @@
 
 
 void LCodeGen::DoMathExp(LMathExp* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
-  DoubleRegister result = ToDoubleRegister(instr->result());
-  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
-  DoubleRegister double_scratch2 = double_scratch0();
+  CpuFeatures::Scope scope(VFP2);
+  DwVfpRegister input = ToDoubleRegister(instr->value());
+  DwVfpRegister result = ToDoubleRegister(instr->result());
+  DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+  DwVfpRegister double_scratch2 = double_scratch0();
   Register temp1 = ToRegister(instr->temp1());
   Register temp2 = ToRegister(instr->temp2());
 
@@ -4110,6 +4315,7 @@
 
 
 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+  CpuFeatures::Scope scope(VFP2);
   Register external_pointer = ToRegister(instr->elements());
   Register key = no_reg;
   ElementsKind elements_kind = instr->elements_kind();
@@ -4180,6 +4386,7 @@
 
 
 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+  CpuFeatures::Scope scope(VFP2);
   DwVfpRegister value = ToDoubleRegister(instr->value());
   Register elements = ToRegister(instr->elements());
   Register key = no_reg;
@@ -4456,6 +4663,7 @@
 
 
 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  CpuFeatures::Scope scope(VFP2);
   LOperand* input = instr->value();
   ASSERT(input->IsRegister() || input->IsStackSlot());
   LOperand* output = instr->result();
@@ -4473,6 +4681,7 @@
 
 
 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+  CpuFeatures::Scope scope(VFP2);
   LOperand* input = instr->value();
   LOperand* output = instr->result();
 
@@ -4534,13 +4743,49 @@
 }
 
 
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+                                Register hiword,
+                                Register loword,
+                                Register scratch,
+                                int leading_zeroes) {
+  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+  const int mantissa_shift_for_hi_word =
+      meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+  const int mantissa_shift_for_lo_word =
+      kBitsPerInt - mantissa_shift_for_hi_word;
+  masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
+  if (mantissa_shift_for_hi_word > 0) {
+    masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
+    masm->orr(hiword, scratch,
+              Operand(hiword, LSR, mantissa_shift_for_hi_word));
+  } else {
+    masm->mov(loword, Operand(0, RelocInfo::NONE));
+    masm->orr(hiword, scratch,
+              Operand(hiword, LSL, -mantissa_shift_for_hi_word));
+  }
+
+  // If least significant bit of biased exponent was not 1 it was corrupted
+  // by most significant bit of mantissa so we should fix that.
+  if (!(biased_exponent & 1)) {
+    masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
+  }
+}
+
+
 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
                                     LOperand* value,
                                     IntegerSignedness signedness) {
   Label slow;
   Register src = ToRegister(value);
   Register dst = ToRegister(instr->result());
-  DoubleRegister dbl_scratch = double_scratch0();
+  DwVfpRegister dbl_scratch = double_scratch0();
   SwVfpRegister flt_scratch = dbl_scratch.low();
 
   // Preserve the value of all registers.
@@ -4555,16 +4800,40 @@
       __ SmiUntag(src, dst);
       __ eor(src, src, Operand(0x80000000));
     }
-    __ vmov(flt_scratch, src);
-    __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+    if (CpuFeatures::IsSupported(VFP2)) {
+      CpuFeatures::Scope scope(VFP2);
+      __ vmov(flt_scratch, src);
+      __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+    } else {
+      FloatingPointHelper::Destination dest =
+          FloatingPointHelper::kCoreRegisters;
+      FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0,
+                                              sfpd_lo, sfpd_hi,
+                                              scratch0(), s0);
+    }
   } else {
-    __ vmov(flt_scratch, src);
-    __ vcvt_f64_u32(dbl_scratch, flt_scratch);
+    if (CpuFeatures::IsSupported(VFP2)) {
+      CpuFeatures::Scope scope(VFP2);
+      __ vmov(flt_scratch, src);
+      __ vcvt_f64_u32(dbl_scratch, flt_scratch);
+    } else {
+      Label no_leading_zero, done;
+      __ tst(src, Operand(0x80000000));
+      __ b(ne, &no_leading_zero);
+
+      // Integer has one leading zeros.
+      GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1);
+      __ b(&done);
+
+      __ bind(&no_leading_zero);
+      GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0);
+      __ b(&done);
+    }
   }
 
   if (FLAG_inline_new) {
-    __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-    __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
+    __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
     __ Move(dst, r5);
     __ b(&done);
   }
@@ -4584,7 +4853,13 @@
   // Done. Put the value in dbl_scratch into the value of the allocated heap
   // number.
   __ bind(&done);
-  __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
+  if (CpuFeatures::IsSupported(VFP2)) {
+    CpuFeatures::Scope scope(VFP2);
+    __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
+  } else {
+    __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
+    __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
+  }
   __ add(dst, dst, Operand(kHeapObjectTag));
   __ StoreToSafepointRegisterSlot(dst, dst);
 }
@@ -4601,7 +4876,7 @@
     LNumberTagD* instr_;
   };
 
-  DoubleRegister input_reg = ToDoubleRegister(instr->value());
+  DwVfpRegister input_reg = ToDoubleRegister(instr->value());
   Register scratch = scratch0();
   Register reg = ToRegister(instr->result());
   Register temp1 = ToRegister(instr->temp());
@@ -4617,7 +4892,13 @@
     __ jmp(deferred->entry());
   }
   __ bind(deferred->exit());
-  __ vstr(input_reg, reg, HeapNumber::kValueOffset);
+  if (CpuFeatures::IsSupported(VFP2)) {
+    CpuFeatures::Scope scope(VFP2);
+    __ vstr(input_reg, reg, HeapNumber::kValueOffset);
+  } else {
+    __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
+    __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
+  }
   // Now that we have finished with the object's real address tag it
   __ add(reg, reg, Operand(kHeapObjectTag));
 }
@@ -4658,13 +4939,14 @@
 
 
 void LCodeGen::EmitNumberUntagD(Register input_reg,
-                                DoubleRegister result_reg,
+                                DwVfpRegister result_reg,
                                 bool deoptimize_on_undefined,
                                 bool deoptimize_on_minus_zero,
                                 LEnvironment* env) {
   Register scratch = scratch0();
   SwVfpRegister flt_scratch = double_scratch0().low();
   ASSERT(!result_reg.is(double_scratch0()));
+  CpuFeatures::Scope scope(VFP2);
 
   Label load_smi, heap_number, done;
 
@@ -4739,6 +5021,7 @@
   __ cmp(scratch1, Operand(ip));
 
   if (instr->truncating()) {
+    CpuFeatures::Scope scope(VFP2);
     Register scratch3 = ToRegister(instr->temp2());
     ASSERT(!scratch3.is(input_reg) &&
            !scratch3.is(scratch1) &&
@@ -4829,7 +5112,7 @@
   ASSERT(result->IsDoubleRegister());
 
   Register input_reg = ToRegister(input);
-  DoubleRegister result_reg = ToDoubleRegister(result);
+  DwVfpRegister result_reg = ToDoubleRegister(result);
 
   EmitNumberUntagD(input_reg, result_reg,
                    instr->hydrogen()->deoptimize_on_undefined(),
@@ -4945,46 +5228,48 @@
 }
 
 
-void LCodeGen::DoCheckMapCommon(Register reg,
-                                Register scratch,
+void LCodeGen::DoCheckMapCommon(Register map_reg,
                                 Handle<Map> map,
                                 CompareMapMode mode,
                                 LEnvironment* env) {
   Label success;
-  __ CompareMap(reg, scratch, map, &success, mode);
+  __ CompareMap(map_reg, map, &success, mode);
   DeoptimizeIf(ne, env);
   __ bind(&success);
 }
 
 
 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
-  Register scratch = scratch0();
+  Register map_reg = scratch0();
   LOperand* input = instr->value();
   ASSERT(input->IsRegister());
   Register reg = ToRegister(input);
 
   Label success;
   SmallMapList* map_set = instr->hydrogen()->map_set();
+  __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
   for (int i = 0; i < map_set->length() - 1; i++) {
     Handle<Map> map = map_set->at(i);
-    __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP);
+    __ CompareMap(map_reg, map, &success, REQUIRE_EXACT_MAP);
     __ b(eq, &success);
   }
   Handle<Map> map = map_set->last();
-  DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
+  DoCheckMapCommon(map_reg, map, REQUIRE_EXACT_MAP, instr->environment());
   __ bind(&success);
 }
 
 
 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
-  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+  CpuFeatures::Scope vfp_scope(VFP2);
+  DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
   Register result_reg = ToRegister(instr->result());
-  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+  DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
   __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
 }
 
 
 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  CpuFeatures::Scope scope(VFP2);
   Register unclamped_reg = ToRegister(instr->unclamped());
   Register result_reg = ToRegister(instr->result());
   __ ClampUint8(result_reg, unclamped_reg);
@@ -4992,10 +5277,11 @@
 
 
 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  CpuFeatures::Scope scope(VFP2);
   Register scratch = scratch0();
   Register input_reg = ToRegister(instr->unclamped());
   Register result_reg = ToRegister(instr->result());
-  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+  DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
   Label is_smi, done, heap_number;
 
   // Both smi and heap number cases are handled.
@@ -5030,28 +5316,30 @@
 
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   ASSERT(instr->temp()->Equals(instr->result()));
-  Register temp1 = ToRegister(instr->temp());
-  Register temp2 = ToRegister(instr->temp2());
+  Register prototype_reg = ToRegister(instr->temp());
+  Register map_reg = ToRegister(instr->temp2());
 
   Handle<JSObject> holder = instr->holder();
   Handle<JSObject> current_prototype = instr->prototype();
 
   // Load prototype object.
-  __ LoadHeapObject(temp1, current_prototype);
+  __ LoadHeapObject(prototype_reg, current_prototype);
 
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
-    DoCheckMapCommon(temp1, temp2,
+    __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
+    DoCheckMapCommon(map_reg,
                      Handle<Map>(current_prototype->map()),
                      ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
-    __ LoadHeapObject(temp1, current_prototype);
+    __ LoadHeapObject(prototype_reg, current_prototype);
   }
 
   // Check the holder map.
-  DoCheckMapCommon(temp1, temp2,
+  __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
+  DoCheckMapCommon(map_reg,
                    Handle<Map>(current_prototype->map()),
                    ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
 }
@@ -5572,6 +5860,7 @@
 
 
 void LCodeGen::EnsureSpaceForLazyDeopt() {
+  if (info()->IsStub()) return;
   // Ensure that we have enough space after the previous lazy-bailout
   // instruction for patching the code here.
   int current_pc = masm()->pc_offset();
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 921285b..f4d8269 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -61,6 +61,7 @@
         deferred_(8, info->zone()),
         osr_pc_offset_(-1),
         last_lazy_deopt_pc_(0),
+        frame_is_built_(false),
         safepoints_(info->zone()),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
@@ -76,6 +77,15 @@
   Heap* heap() const { return isolate()->heap(); }
   Zone* zone() const { return zone_; }
 
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 ||
+        info()->is_non_deferred_calling() ||
+        !info()->IsStub();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
   // Support for converting LOperands to assembler types.
   // LOperand must be a register.
   Register ToRegister(LOperand* op) const;
@@ -84,12 +94,12 @@
   Register EmitLoadRegister(LOperand* op, Register scratch);
 
   // LOperand must be a double register.
-  DoubleRegister ToDoubleRegister(LOperand* op) const;
+  DwVfpRegister ToDoubleRegister(LOperand* op) const;
 
   // LOperand is loaded into dbl_scratch, unless already a double register.
-  DoubleRegister EmitLoadDoubleRegister(LOperand* op,
-                                        SwVfpRegister flt_scratch,
-                                        DoubleRegister dbl_scratch);
+  DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
+                                       SwVfpRegister flt_scratch,
+                                       DwVfpRegister dbl_scratch);
   int ToInteger32(LConstantOperand* op) const;
   double ToDouble(LConstantOperand* op) const;
   Operand ToOperand(LOperand* op);
@@ -131,7 +141,7 @@
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
 
-  void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
+  void DoCheckMapCommon(Register map_reg, Handle<Map> map,
                         CompareMapMode mode, LEnvironment* env);
 
   // Parallel move support.
@@ -193,7 +203,7 @@
                        Register temporary2);
 
   int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
-  int GetParameterCount() const { return scope()->num_parameters(); }
+  int GetParameterCount() const { return info()->num_parameters(); }
 
   void Abort(const char* reason);
   void Comment(const char* format, ...);
@@ -275,7 +285,7 @@
   void PopulateDeoptimizationLiteralsWithInlinedFunctions();
 
   Register ToRegister(int index) const;
-  DoubleRegister ToDoubleRegister(int index) const;
+  DwVfpRegister ToDoubleRegister(int index) const;
 
   // Specific math operations - used from DoUnaryMathOperation.
   void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -308,7 +318,7 @@
   void EmitGoto(int block);
   void EmitBranch(int left_block, int right_block, Condition cc);
   void EmitNumberUntagD(Register input,
-                        DoubleRegister result,
+                        DwVfpRegister result,
                         bool deoptimize_on_undefined,
                         bool deoptimize_on_minus_zero,
                         LEnvironment* env);
@@ -369,11 +379,15 @@
                                            LEnvironment* environment);
 
   struct JumpTableEntry {
-    explicit inline JumpTableEntry(Address entry)
+    inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
         : label(),
-          address(entry) { }
+          address(entry),
+          needs_frame(frame),
+          is_lazy_deopt(is_lazy) { }
     Label label;
     Address address;
+    bool needs_frame;
+    bool is_lazy_deopt;
   };
 
   void EnsureSpaceForLazyDeopt();
@@ -402,6 +416,7 @@
   ZoneList<LDeferredCode*> deferred_;
   int osr_pc_offset_;
   int last_lazy_deopt_pc_;
+  bool frame_is_built_;
 
   // Builder that keeps track of safepoints in the code. The table
   // itself is emitted at the end of the generated code.
@@ -417,6 +432,7 @@
     PushSafepointRegistersScope(LCodeGen* codegen,
                                 Safepoint::Kind kind)
         : codegen_(codegen) {
+      ASSERT(codegen_->info()->is_calling());
       ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
       codegen_->expected_safepoint_kind_ = kind;
 
diff --git a/src/arm/lithium-gap-resolver-arm.cc b/src/arm/lithium-gap-resolver-arm.cc
index c100720..4df1338 100644
--- a/src/arm/lithium-gap-resolver-arm.cc
+++ b/src/arm/lithium-gap-resolver-arm.cc
@@ -171,8 +171,10 @@
   } else if (source->IsStackSlot()) {
     __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
   } else if (source->IsDoubleRegister()) {
+    CpuFeatures::Scope scope(VFP2);
     __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
   } else if (source->IsDoubleStackSlot()) {
+    CpuFeatures::Scope scope(VFP2);
     __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
   } else {
     UNREACHABLE();
@@ -192,8 +194,10 @@
   } else if (saved_destination_->IsStackSlot()) {
     __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
   } else if (saved_destination_->IsDoubleRegister()) {
+    CpuFeatures::Scope scope(VFP2);
     __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
   } else if (saved_destination_->IsDoubleStackSlot()) {
+    CpuFeatures::Scope scope(VFP2);
     __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
   } else {
     UNREACHABLE();
@@ -229,7 +233,8 @@
       MemOperand destination_operand = cgen_->ToMemOperand(destination);
       if (in_cycle_) {
         if (!destination_operand.OffsetIsUint12Encodable()) {
-          // ip is overwritten while saving the value to the destination.
+          CpuFeatures::Scope scope(VFP2);
+            // ip is overwritten while saving the value to the destination.
           // Therefore we can't use ip.  It is OK if the read from the source
           // destroys ip, since that happens before the value is read.
           __ vldr(kScratchDoubleReg.low(), source_operand);
@@ -267,7 +272,8 @@
     }
 
   } else if (source->IsDoubleRegister()) {
-    DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+    CpuFeatures::Scope scope(VFP2);
+    DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
     if (destination->IsDoubleRegister()) {
       __ vmov(cgen_->ToDoubleRegister(destination), source_register);
     } else {
@@ -276,7 +282,8 @@
     }
 
   } else if (source->IsDoubleStackSlot()) {
-    MemOperand source_operand = cgen_->ToMemOperand(source);
+    CpuFeatures::Scope scope(VFP2);
+      MemOperand source_operand = cgen_->ToMemOperand(source);
     if (destination->IsDoubleRegister()) {
       __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
     } else {
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index dc1dc1d..33b557e 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -290,7 +290,7 @@
 }
 
 
-void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
   ASSERT(CpuFeatures::IsSupported(VFP2));
   CpuFeatures::Scope scope(VFP2);
   if (!dst.is(src)) {
@@ -643,19 +643,19 @@
 
 void MacroAssembler::PushSafepointRegistersAndDoubles() {
   PushSafepointRegisters();
-  sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+  sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
                       kDoubleSize));
-  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
     vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
   }
 }
 
 
 void MacroAssembler::PopSafepointRegistersAndDoubles() {
-  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
     vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
   }
-  add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+  add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
                       kDoubleSize));
   PopSafepointRegisters();
 }
@@ -691,7 +691,7 @@
 
 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
   // General purpose registers are pushed last on the stack.
-  int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
+  int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
   return MemOperand(sp, doubles_size + register_offset);
 }
@@ -967,7 +967,7 @@
   }
 }
 
-void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
   ASSERT(CpuFeatures::IsSupported(VFP2));
   if (use_eabi_hardfloat()) {
     Move(dst, d0);
@@ -2729,7 +2729,10 @@
   const Runtime::Function* function = Runtime::FunctionForId(id);
   mov(r0, Operand(function->nargs));
   mov(r1, Operand(ExternalReference(function, isolate())));
-  CEntryStub stub(1, kSaveFPRegs);
+  SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
+      ? kSaveFPRegs
+      : kDontSaveFPRegs;
+  CEntryStub stub(1, mode);
   CallStub(&stub);
 }
 
@@ -3405,9 +3408,9 @@
   if (use_eabi_hardfloat()) {
     // In the hard floating point calling convention, we can use
     // all double registers to pass doubles.
-    if (num_double_arguments > DoubleRegister::kNumRegisters) {
+    if (num_double_arguments > DoubleRegister::NumRegisters()) {
       stack_passed_words +=
-          2 * (num_double_arguments - DoubleRegister::kNumRegisters);
+          2 * (num_double_arguments - DoubleRegister::NumRegisters());
     }
   } else {
     // In the soft floating point calling convention, every double
@@ -3448,7 +3451,7 @@
 }
 
 
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
   ASSERT(CpuFeatures::IsSupported(VFP2));
   if (use_eabi_hardfloat()) {
     Move(d0, dreg);
@@ -3458,8 +3461,8 @@
 }
 
 
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
-                                             DoubleRegister dreg2) {
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
+                                             DwVfpRegister dreg2) {
   ASSERT(CpuFeatures::IsSupported(VFP2));
   if (use_eabi_hardfloat()) {
     if (dreg2.is(d0)) {
@@ -3477,7 +3480,7 @@
 }
 
 
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
                                              Register reg) {
   ASSERT(CpuFeatures::IsSupported(VFP2));
   if (use_eabi_hardfloat()) {
@@ -3760,8 +3763,8 @@
 
 
 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
-                                        DoubleRegister input_reg,
-                                        DoubleRegister temp_double_reg) {
+                                        DwVfpRegister input_reg,
+                                        DwVfpRegister temp_double_reg) {
   Label above_zero;
   Label done;
   Label in_bounds;
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 15cef16..3dedd29 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -178,7 +178,7 @@
   // Register move. May do nothing if the registers are identical.
   void Move(Register dst, Handle<Object> value);
   void Move(Register dst, Register src, Condition cond = al);
-  void Move(DoubleRegister dst, DoubleRegister src);
+  void Move(DwVfpRegister dst, DwVfpRegister src);
 
   // Load an object from the root table.
   void LoadRoot(Register destination,
@@ -1066,9 +1066,9 @@
   // whether soft or hard floating point ABI is used. These functions
   // abstract parameter passing for the three different ways we call
   // C functions from generated code.
-  void SetCallCDoubleArguments(DoubleRegister dreg);
-  void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
-  void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
+  void SetCallCDoubleArguments(DwVfpRegister dreg);
+  void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2);
+  void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg);
 
   // Calls a C function and cleans up the space for arguments allocated
   // by PrepareCallCFunction. The called function is not allowed to trigger a
@@ -1084,7 +1084,7 @@
                      int num_reg_arguments,
                      int num_double_arguments);
 
-  void GetCFunctionDoubleResult(const DoubleRegister dst);
+  void GetCFunctionDoubleResult(const DwVfpRegister dst);
 
   // Calls an API function.  Allocates HandleScope, extracts returned value
   // from handle and propagates exceptions.  Restores context.  stack_space
@@ -1297,8 +1297,8 @@
   void ClampUint8(Register output_reg, Register input_reg);
 
   void ClampDoubleToUint8(Register result_reg,
-                          DoubleRegister input_reg,
-                          DoubleRegister temp_double_reg);
+                          DwVfpRegister input_reg,
+                          DwVfpRegister temp_double_reg);
 
 
   void LoadInstanceDescriptors(Register map, Register descriptors);
@@ -1373,9 +1373,9 @@
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
-  // Needs access to SafepointRegisterStackIndex for optimized frame
+  // Needs access to SafepointRegisterStackIndex for compiled frame
   // traversal.
-  friend class OptimizedFrame;
+  friend class StandardFrame;
 };
 
 
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index a194dfa..dd5db30 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1053,42 +1053,6 @@
 }
 
 
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
-                                Register hiword,
-                                Register loword,
-                                Register scratch,
-                                int leading_zeroes) {
-  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
-  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
-  const int mantissa_shift_for_hi_word =
-      meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
-
-  const int mantissa_shift_for_lo_word =
-      kBitsPerInt - mantissa_shift_for_hi_word;
-
-  __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
-  if (mantissa_shift_for_hi_word > 0) {
-    __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
-    __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
-  } else {
-    __ mov(loword, Operand(0, RelocInfo::NONE));
-    __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
-  }
-
-  // If least significant bit of biased exponent was not 1 it was corrupted
-  // by most significant bit of mantissa so we should fix that.
-  if (!(biased_exponent & 1)) {
-    __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
-  }
-}
-
-
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -3319,9 +3283,17 @@
   //  -- r1    : receiver
   // -----------------------------------
   ElementsKind elements_kind = receiver_map->elements_kind();
-  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
-  __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+  if (receiver_map->has_fast_elements() ||
+      receiver_map->has_external_array_elements()) {
+    Handle<Code> stub = KeyedLoadFastElementStub(
+        receiver_map->instance_type() == JS_ARRAY_TYPE,
+        elements_kind).GetCode();
+    __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+  } else {
+    Handle<Code> stub =
+        KeyedLoadDictionaryElementStub().GetCode();
+    __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+  }
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
@@ -3726,339 +3698,6 @@
 }
 
 
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
-    MacroAssembler* masm,
-    ElementsKind elements_kind) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- r0     : key
-  //  -- r1     : receiver
-  // -----------------------------------
-  Label miss_force_generic, slow, failed_allocation;
-
-  Register key = r0;
-  Register receiver = r1;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
-
-  __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // r3: elements array
-
-  // Check that the index is in range.
-  __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
-  __ cmp(key, ip);
-  // Unsigned comparison catches both negative and too-large values.
-  __ b(hs, &miss_force_generic);
-
-  __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-  // r3: base pointer of external storage
-
-  // We are not untagging smi key and instead work with it
-  // as if it was premultiplied by 2.
-  STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
-
-  Register value = r2;
-  switch (elements_kind) {
-    case EXTERNAL_BYTE_ELEMENTS:
-      __ ldrsb(value, MemOperand(r3, key, LSR, 1));
-      break;
-    case EXTERNAL_PIXEL_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      __ ldrb(value, MemOperand(r3, key, LSR, 1));
-      break;
-    case EXTERNAL_SHORT_ELEMENTS:
-      __ ldrsh(value, MemOperand(r3, key, LSL, 0));
-      break;
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      __ ldrh(value, MemOperand(r3, key, LSL, 0));
-      break;
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-      __ ldr(value, MemOperand(r3, key, LSL, 1));
-      break;
-    case EXTERNAL_FLOAT_ELEMENTS:
-      if (CpuFeatures::IsSupported(VFP2)) {
-        CpuFeatures::Scope scope(VFP2);
-        __ add(r2, r3, Operand(key, LSL, 1));
-        __ vldr(s0, r2, 0);
-      } else {
-        __ ldr(value, MemOperand(r3, key, LSL, 1));
-      }
-      break;
-    case EXTERNAL_DOUBLE_ELEMENTS:
-      if (CpuFeatures::IsSupported(VFP2)) {
-        CpuFeatures::Scope scope(VFP2);
-        __ add(r2, r3, Operand(key, LSL, 2));
-        __ vldr(d0, r2, 0);
-      } else {
-        __ add(r4, r3, Operand(key, LSL, 2));
-        // r4: pointer to the beginning of the double we want to load.
-        __ ldr(r2, MemOperand(r4, 0));
-        __ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
-      }
-      break;
-    case FAST_ELEMENTS:
-    case FAST_SMI_ELEMENTS:
-    case FAST_DOUBLE_ELEMENTS:
-    case FAST_HOLEY_ELEMENTS:
-    case FAST_HOLEY_SMI_ELEMENTS:
-    case FAST_HOLEY_DOUBLE_ELEMENTS:
-    case DICTIONARY_ELEMENTS:
-    case NON_STRICT_ARGUMENTS_ELEMENTS:
-      UNREACHABLE();
-      break;
-  }
-
-  // For integer array types:
-  // r2: value
-  // For float array type:
-  // s0: value (if VFP3 is supported)
-  // r2: value (if VFP3 is not supported)
-  // For double array type:
-  // d0: value (if VFP3 is supported)
-  // r2/r3: value (if VFP3 is not supported)
-
-  if (elements_kind == EXTERNAL_INT_ELEMENTS) {
-    // For the Int and UnsignedInt array types, we need to see whether
-    // the value can be represented in a Smi. If not, we need to convert
-    // it to a HeapNumber.
-    Label box_int;
-    __ cmp(value, Operand(0xC0000000));
-    __ b(mi, &box_int);
-    // Tag integer as smi and return it.
-    __ mov(r0, Operand(value, LSL, kSmiTagSize));
-    __ Ret();
-
-    __ bind(&box_int);
-    if (CpuFeatures::IsSupported(VFP2)) {
-      CpuFeatures::Scope scope(VFP2);
-      // Allocate a HeapNumber for the result and perform int-to-double
-      // conversion.  Don't touch r0 or r1 as they are needed if allocation
-      // fails.
-      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-
-      __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
-      // Now we can use r0 for the result as key is not needed any more.
-      __ add(r0, r5, Operand(kHeapObjectTag));
-      __ vmov(s0, value);
-      __ vcvt_f64_s32(d0, s0);
-      __ vstr(d0, r5, HeapNumber::kValueOffset);
-      __ Ret();
-    } else {
-      // Allocate a HeapNumber for the result and perform int-to-double
-      // conversion.  Don't touch r0 or r1 as they are needed if allocation
-      // fails.
-      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-      __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
-      // Now we can use r0 for the result as key is not needed any more.
-      __ mov(r0, r5);
-      Register dst_mantissa = r1;
-      Register dst_exponent = r3;
-      FloatingPointHelper::Destination dest =
-          FloatingPointHelper::kCoreRegisters;
-      FloatingPointHelper::ConvertIntToDouble(masm,
-                                              value,
-                                              dest,
-                                              d0,
-                                              dst_mantissa,
-                                              dst_exponent,
-                                              r9,
-                                              s0);
-      __ str(dst_mantissa, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
-      __ str(dst_exponent, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-      __ Ret();
-    }
-  } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
-    // The test is different for unsigned int values. Since we need
-    // the value to be in the range of a positive smi, we can't
-    // handle either of the top two bits being set in the value.
-    if (CpuFeatures::IsSupported(VFP2)) {
-      CpuFeatures::Scope scope(VFP2);
-      Label box_int, done;
-      __ tst(value, Operand(0xC0000000));
-      __ b(ne, &box_int);
-      // Tag integer as smi and return it.
-      __ mov(r0, Operand(value, LSL, kSmiTagSize));
-      __ Ret();
-
-      __ bind(&box_int);
-      __ vmov(s0, value);
-      // Allocate a HeapNumber for the result and perform int-to-double
-      // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
-      // registers - also when jumping due to exhausted young space.
-      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-      __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
-
-      __ vcvt_f64_u32(d0, s0);
-      __ vstr(d0, r2, HeapNumber::kValueOffset);
-
-      __ add(r0, r2, Operand(kHeapObjectTag));
-      __ Ret();
-    } else {
-      // Check whether unsigned integer fits into smi.
-      Label box_int_0, box_int_1, done;
-      __ tst(value, Operand(0x80000000));
-      __ b(ne, &box_int_0);
-      __ tst(value, Operand(0x40000000));
-      __ b(ne, &box_int_1);
-      // Tag integer as smi and return it.
-      __ mov(r0, Operand(value, LSL, kSmiTagSize));
-      __ Ret();
-
-      Register hiword = value;  // r2.
-      Register loword = r3;
-
-      __ bind(&box_int_0);
-      // Integer does not have leading zeros.
-      GenerateUInt2Double(masm, hiword, loword, r4, 0);
-      __ b(&done);
-
-      __ bind(&box_int_1);
-      // Integer has one leading zero.
-      GenerateUInt2Double(masm, hiword, loword, r4, 1);
-
-
-      __ bind(&done);
-      // Integer was converted to double in registers hiword:loword.
-      // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
-      // clobbers all registers - also when jumping due to exhausted young
-      // space.
-      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-      __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT);
-
-      __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
-      __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
-
-      __ mov(r0, r4);
-      __ Ret();
-    }
-  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
-    // For the floating-point array type, we need to always allocate a
-    // HeapNumber.
-    if (CpuFeatures::IsSupported(VFP2)) {
-      CpuFeatures::Scope scope(VFP2);
-      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
-      // AllocateHeapNumber clobbers all registers - also when jumping due to
-      // exhausted young space.
-      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-      __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
-      __ vcvt_f64_f32(d0, s0);
-      __ vstr(d0, r2, HeapNumber::kValueOffset);
-
-      __ add(r0, r2, Operand(kHeapObjectTag));
-      __ Ret();
-    } else {
-      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
-      // AllocateHeapNumber clobbers all registers - also when jumping due to
-      // exhausted young space.
-      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-      __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT);
-      // VFP is not available, do manual single to double conversion.
-
-      // r2: floating point value (binary32)
-      // r3: heap number for result
-
-      // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
-      // the slow case from here.
-      __ and_(r0, value, Operand(kBinary32MantissaMask));
-
-      // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
-      // the slow case from here.
-      __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
-      __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
-      Label exponent_rebiased;
-      __ teq(r1, Operand(0x00));
-      __ b(eq, &exponent_rebiased);
-
-      __ teq(r1, Operand(0xff));
-      __ mov(r1, Operand(0x7ff), LeaveCC, eq);
-      __ b(eq, &exponent_rebiased);
-
-      // Rebias exponent.
-      __ add(r1,
-             r1,
-             Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
-      __ bind(&exponent_rebiased);
-      __ and_(r2, value, Operand(kBinary32SignMask));
-      value = no_reg;
-      __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
-
-      // Shift mantissa.
-      static const int kMantissaShiftForHiWord =
-          kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
-      static const int kMantissaShiftForLoWord =
-          kBitsPerInt - kMantissaShiftForHiWord;
-
-      __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
-      __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
-
-      __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
-      __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
-
-      __ mov(r0, r3);
-      __ Ret();
-    }
-  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-    if (CpuFeatures::IsSupported(VFP2)) {
-      CpuFeatures::Scope scope(VFP2);
-      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
-      // AllocateHeapNumber clobbers all registers - also when jumping due to
-      // exhausted young space.
-      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-      __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
-      __ vstr(d0, r2, HeapNumber::kValueOffset);
-
-      __ add(r0, r2, Operand(kHeapObjectTag));
-      __ Ret();
-    } else {
-      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
-      // AllocateHeapNumber clobbers all registers - also when jumping due to
-      // exhausted young space.
-      __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
-      __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT);
-
-      __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
-      __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
-      __ mov(r0, r4);
-      __ Ret();
-    }
-
-  } else {
-    // Tag integer as smi and return it.
-    __ mov(r0, Operand(value, LSL, kSmiTagSize));
-    __ Ret();
-  }
-
-  // Slow case, key and receiver still in r0 and r1.
-  __ bind(&slow);
-  __ IncrementCounter(
-      masm->isolate()->counters()->keyed_load_external_array_slow(),
-      1, r2, r3);
-
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- r0     : key
-  //  -- r1     : receiver
-  // -----------------------------------
-
-  __ Push(r1, r0);
-
-  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-
-  __ bind(&miss_force_generic);
-  Handle<Code> stub =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
 void KeyedStoreStubCompiler::GenerateStoreExternalArray(
     MacroAssembler* masm,
     ElementsKind elements_kind) {
@@ -4403,118 +4042,6 @@
 }
 
 
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- lr    : return address
-  //  -- r0    : key
-  //  -- r1    : receiver
-  // -----------------------------------
-  Label miss_force_generic;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic);
-
-  // Get the elements array.
-  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
-  __ AssertFastElements(r2);
-
-  // Check that the key is within bounds.
-  __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
-  __ cmp(r0, Operand(r3));
-  __ b(hs, &miss_force_generic);
-
-  // Load the result and make sure it's not the hole.
-  __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ ldr(r4,
-         MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-  __ cmp(r4, ip);
-  __ b(eq, &miss_force_generic);
-  __ mov(r0, r4);
-  __ Ret();
-
-  __ bind(&miss_force_generic);
-  Handle<Code> stub =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
-    MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- lr    : return address
-  //  -- r0    : key
-  //  -- r1    : receiver
-  // -----------------------------------
-  Label miss_force_generic, slow_allocate_heapnumber;
-
-  Register key_reg = r0;
-  Register receiver_reg = r1;
-  Register elements_reg = r2;
-  Register heap_number_reg = r2;
-  Register indexed_double_offset = r3;
-  Register scratch = r4;
-  Register scratch2 = r5;
-  Register scratch3 = r6;
-  Register heap_number_map = r7;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
-
-  // Get the elements array.
-  __ ldr(elements_reg,
-         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
-  // Check that the key is within bounds.
-  __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
-  __ cmp(key_reg, Operand(scratch));
-  __ b(hs, &miss_force_generic);
-
-  // Load the upper word of the double in the fixed array and test for NaN.
-  __ add(indexed_double_offset, elements_reg,
-         Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
-  uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
-  __ cmp(scratch, Operand(kHoleNanUpper32));
-  __ b(&miss_force_generic, eq);
-
-  // Non-NaN. Allocate a new heap number and copy the double value into it.
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-  __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
-                        heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
-
-  // Don't need to reload the upper 32 bits of the double, it's already in
-  // scratch.
-  __ str(scratch, FieldMemOperand(heap_number_reg,
-                                  HeapNumber::kExponentOffset));
-  __ ldr(scratch, FieldMemOperand(indexed_double_offset,
-                                  FixedArray::kHeaderSize));
-  __ str(scratch, FieldMemOperand(heap_number_reg,
-                                  HeapNumber::kMantissaOffset));
-
-  __ mov(r0, heap_number_reg);
-  __ Ret();
-
-  __ bind(&slow_allocate_heapnumber);
-  Handle<Code> slow_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_Slow();
-  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
-  __ bind(&miss_force_generic);
-  Handle<Code> miss_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
 void KeyedStoreStubCompiler::GenerateStoreFastElement(
     MacroAssembler* masm,
     bool is_js_array,
diff --git a/src/assembler.cc b/src/assembler.cc
index 25157be..ccaf290 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -1375,6 +1375,11 @@
 }
 
 
+ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
+  return ExternalReference(entry);
+}
+
+
 // Helper function to compute x^y, where y is known to be an
 // integer. Uses binary decomposition to limit the number of
 // multiplications; see the discussion in "Hacker's Delight" by Henry
diff --git a/src/assembler.h b/src/assembler.h
index 4639374..111c1d9 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -736,6 +736,8 @@
 
   static ExternalReference page_flags(Page* page);
 
+  static ExternalReference ForDeoptEntry(Address entry);
+
   Address address() const {return reinterpret_cast<Address>(address_);}
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/src/ast.cc b/src/ast.cc
index 232cb73..c43b913 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -616,14 +616,6 @@
 // ----------------------------------------------------------------------------
 // Implementation of AstVisitor
 
-bool AstVisitor::CheckStackOverflow() {
-  if (stack_overflow_) return true;
-  StackLimitCheck check(isolate_);
-  if (!check.HasOverflowed()) return false;
-  return (stack_overflow_ = true);
-}
-
-
 void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
   for (int i = 0; i < declarations->length(); i++) {
     Visit(declarations->at(i));
diff --git a/src/ast.h b/src/ast.h
index d299f19..a0a7a73 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -2492,42 +2492,53 @@
 
 class AstVisitor BASE_EMBEDDED {
  public:
-  AstVisitor() : isolate_(Isolate::Current()), stack_overflow_(false) { }
+  AstVisitor() {}
   virtual ~AstVisitor() { }
 
   // Stack overflow check and dynamic dispatch.
-  void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); }
+  virtual void Visit(AstNode* node) = 0;
 
   // Iteration left-to-right.
   virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
   virtual void VisitStatements(ZoneList<Statement*>* statements);
   virtual void VisitExpressions(ZoneList<Expression*>* expressions);
 
-  // Stack overflow tracking support.
-  bool HasStackOverflow() const { return stack_overflow_; }
-  bool CheckStackOverflow();
-
-  // If a stack-overflow exception is encountered when visiting a
-  // node, calling SetStackOverflow will make sure that the visitor
-  // bails out without visiting more nodes.
-  void SetStackOverflow() { stack_overflow_ = true; }
-  void ClearStackOverflow() { stack_overflow_ = false; }
-
   // Individual AST nodes.
 #define DEF_VISIT(type)                         \
   virtual void Visit##type(type* node) = 0;
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
-
- protected:
-  Isolate* isolate() { return isolate_; }
-
- private:
-  Isolate* isolate_;
-  bool stack_overflow_;
 };
 
 
+#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS()                       \
+public:                                                             \
+  virtual void Visit(AstNode* node) {                               \
+    if (!CheckStackOverflow()) node->Accept(this);                  \
+  }                                                                 \
+                                                                    \
+  void SetStackOverflow() { stack_overflow_ = true; }               \
+  void ClearStackOverflow() { stack_overflow_ = false; }            \
+  bool HasStackOverflow() const { return stack_overflow_; }         \
+                                                                    \
+  bool CheckStackOverflow() {                                       \
+    if (stack_overflow_) return true;                               \
+    StackLimitCheck check(isolate_);                                \
+    if (!check.HasOverflowed()) return false;                       \
+    return (stack_overflow_ = true);                                \
+  }                                                                 \
+                                                                    \
+private:                                                            \
+  void InitializeAstVisitor() {                                     \
+    isolate_ = Isolate::Current();                                  \
+    stack_overflow_ = false;                                        \
+  }                                                                 \
+  Isolate* isolate() { return isolate_; }                           \
+                                                                    \
+  Isolate* isolate_;                                                \
+  bool stack_overflow_
+
+
 // ----------------------------------------------------------------------------
 // Construction time visitor.
 
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 8d52950..bf96c8c 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -353,7 +353,7 @@
                                           bool is_ecma_native) {
   Isolate* isolate = target->GetIsolate();
   Factory* factory = isolate->factory();
-  Handle<String> symbol = factory->LookupAsciiSymbol(name);
+  Handle<String> symbol = factory->LookupUtf8Symbol(name);
   Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call));
   Handle<JSFunction> function = prototype.is_null() ?
     factory->NewFunctionWithoutPrototype(symbol, call_code) :
@@ -488,7 +488,8 @@
 
   // Allocate the empty function as the prototype for function ECMAScript
   // 262 15.3.4.
-  Handle<String> symbol = factory->LookupAsciiSymbol("Empty");
+  Handle<String> symbol =
+      factory->LookupOneByteSymbol(STATIC_ASCII_VECTOR("Empty"));
   Handle<JSFunction> empty_function =
       factory->NewFunctionWithoutPrototype(symbol, CLASSIC_MODE);
 
@@ -570,7 +571,8 @@
 // ECMAScript 5th Edition, 13.2.3
 Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
   if (throw_type_error_function.is_null()) {
-    Handle<String> name = factory()->LookupAsciiSymbol("ThrowTypeError");
+    Handle<String> name = factory()->LookupOneByteSymbol(
+        STATIC_ASCII_VECTOR("ThrowTypeError"));
     throw_type_error_function =
       factory()->NewFunctionWithoutPrototype(name, CLASSIC_MODE);
     Handle<Code> code(isolate()->builtins()->builtin(
@@ -771,7 +773,8 @@
                                      factory()->OuterGlobalObject);
   }
 
-  Handle<String> global_name = factory()->LookupAsciiSymbol("global");
+  Handle<String> global_name = factory()->LookupOneByteSymbol(
+      STATIC_ASCII_VECTOR("global"));
   global_proxy_function->shared()->set_instance_class_name(*global_name);
   global_proxy_function->initial_map()->set_is_access_check_needed(true);
 
@@ -811,7 +814,8 @@
   static const PropertyAttributes attributes =
       static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
   ForceSetProperty(builtins_global,
-                   factory()->LookupAsciiSymbol("global"),
+                   factory()->LookupOneByteSymbol(
+                       STATIC_ASCII_VECTOR("global")),
                    inner_global,
                    attributes);
   // Set up the reference from the global object to the builtins object.
@@ -1049,7 +1053,8 @@
     // Make sure we can recognize argument objects at runtime.
     // This is done by introducing an anonymous function with
     // class_name equals 'Arguments'.
-    Handle<String> symbol = factory->LookupAsciiSymbol("Arguments");
+    Handle<String> symbol = factory->LookupOneByteSymbol(
+        STATIC_ASCII_VECTOR("Arguments"));
     Handle<Code> code = Handle<Code>(
         isolate->builtins()->builtin(Builtins::kIllegal));
     Handle<JSObject> prototype =
@@ -1207,7 +1212,8 @@
                              code,
                              true);
 
-    Handle<String> name = factory->LookupAsciiSymbol("context_extension");
+    Handle<String> name =
+        factory->LookupOneByteSymbol(STATIC_ASCII_VECTOR("context_extension"));
     context_extension_fun->shared()->set_instance_class_name(*name);
     native_context()->set_context_extension_function(*context_extension_fun);
   }
@@ -1381,11 +1387,12 @@
 }
 
 
-#define INSTALL_NATIVE(Type, name, var)                                       \
-  Handle<String> var##_name = factory()->LookupAsciiSymbol(name);             \
-  Object* var##_native =                                                      \
-      native_context()->builtins()->GetPropertyNoExceptionThrown(             \
-           *var##_name);                                                      \
+#define INSTALL_NATIVE(Type, name, var)                                 \
+  Handle<String> var##_name =                                           \
+    factory()->LookupOneByteSymbol(STATIC_ASCII_VECTOR(name));          \
+  Object* var##_native =                                                \
+      native_context()->builtins()->GetPropertyNoExceptionThrown(       \
+           *var##_name);                                                \
   native_context()->set_##var(Type::cast(var##_native));
 
 
@@ -1439,7 +1446,8 @@
                              JS_BUILTINS_OBJECT_TYPE,
                              JSBuiltinsObject::kSize, code, true);
 
-  Handle<String> name = factory()->LookupAsciiSymbol("builtins");
+  Handle<String> name =
+      factory()->LookupOneByteSymbol(STATIC_ASCII_VECTOR("builtins"));
   builtins_fun->shared()->set_instance_class_name(*name);
   builtins_fun->initial_map()->set_dictionary_map(true);
   builtins_fun->initial_map()->set_prototype(heap()->null_value());
@@ -1458,7 +1466,8 @@
   // global object.
   static const PropertyAttributes attributes =
       static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
-  Handle<String> global_symbol = factory()->LookupAsciiSymbol("global");
+  Handle<String> global_symbol =
+      factory()->LookupOneByteSymbol(STATIC_ASCII_VECTOR("global"));
   Handle<Object> global_obj(native_context()->global_object());
   CHECK_NOT_EMPTY_HANDLE(isolate(),
                          JSObject::SetLocalPropertyIgnoreAttributes(
@@ -1501,41 +1510,49 @@
     Handle<Foreign> script_source(
         factory()->NewForeign(&Accessors::ScriptSource));
     Handle<Foreign> script_name(factory()->NewForeign(&Accessors::ScriptName));
-    Handle<String> id_symbol(factory()->LookupAsciiSymbol("id"));
+    Handle<String> id_symbol(factory()->LookupOneByteSymbol(
+        STATIC_ASCII_VECTOR("id")));
     Handle<Foreign> script_id(factory()->NewForeign(&Accessors::ScriptId));
     Handle<String> line_offset_symbol(
-        factory()->LookupAsciiSymbol("line_offset"));
+        factory()->LookupOneByteSymbol(STATIC_ASCII_VECTOR("line_offset")));
     Handle<Foreign> script_line_offset(
         factory()->NewForeign(&Accessors::ScriptLineOffset));
     Handle<String> column_offset_symbol(
-        factory()->LookupAsciiSymbol("column_offset"));
+        factory()->LookupOneByteSymbol(STATIC_ASCII_VECTOR("column_offset")));
     Handle<Foreign> script_column_offset(
         factory()->NewForeign(&Accessors::ScriptColumnOffset));
-    Handle<String> data_symbol(factory()->LookupAsciiSymbol("data"));
+    Handle<String> data_symbol(factory()->LookupOneByteSymbol(
+        STATIC_ASCII_VECTOR("data")));
     Handle<Foreign> script_data(factory()->NewForeign(&Accessors::ScriptData));
-    Handle<String> type_symbol(factory()->LookupAsciiSymbol("type"));
+    Handle<String> type_symbol(factory()->LookupOneByteSymbol(
+        STATIC_ASCII_VECTOR("type")));
     Handle<Foreign> script_type(factory()->NewForeign(&Accessors::ScriptType));
     Handle<String> compilation_type_symbol(
-        factory()->LookupAsciiSymbol("compilation_type"));
+        factory()->LookupOneByteSymbol(
+            STATIC_ASCII_VECTOR("compilation_type")));
     Handle<Foreign> script_compilation_type(
         factory()->NewForeign(&Accessors::ScriptCompilationType));
-    Handle<String> line_ends_symbol(factory()->LookupAsciiSymbol("line_ends"));
+    Handle<String> line_ends_symbol(factory()->LookupOneByteSymbol(
+        STATIC_ASCII_VECTOR("line_ends")));
     Handle<Foreign> script_line_ends(
         factory()->NewForeign(&Accessors::ScriptLineEnds));
     Handle<String> context_data_symbol(
-        factory()->LookupAsciiSymbol("context_data"));
+        factory()->LookupOneByteSymbol(STATIC_ASCII_VECTOR("context_data")));
     Handle<Foreign> script_context_data(
         factory()->NewForeign(&Accessors::ScriptContextData));
     Handle<String> eval_from_script_symbol(
-        factory()->LookupAsciiSymbol("eval_from_script"));
+        factory()->LookupOneByteSymbol(
+            STATIC_ASCII_VECTOR("eval_from_script")));
     Handle<Foreign> script_eval_from_script(
         factory()->NewForeign(&Accessors::ScriptEvalFromScript));
     Handle<String> eval_from_script_position_symbol(
-        factory()->LookupAsciiSymbol("eval_from_script_position"));
+        factory()->LookupOneByteSymbol(
+            STATIC_ASCII_VECTOR("eval_from_script_position")));
     Handle<Foreign> script_eval_from_script_position(
         factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition));
     Handle<String> eval_from_function_name_symbol(
-        factory()->LookupAsciiSymbol("eval_from_function_name"));
+        factory()->LookupOneByteSymbol(
+            STATIC_ASCII_VECTOR("eval_from_function_name")));
     Handle<Foreign> script_eval_from_function_name(
         factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName));
     PropertyAttributes attribs =
@@ -1855,13 +1872,13 @@
   const char* period_pos = strchr(holder_expr, '.');
   if (period_pos == NULL) {
     return Handle<JSObject>::cast(
-        GetProperty(global, factory->LookupAsciiSymbol(holder_expr)));
+        GetProperty(global, factory->LookupUtf8Symbol(holder_expr)));
   }
   ASSERT_EQ(".prototype", period_pos);
   Vector<const char> property(holder_expr,
                               static_cast<int>(period_pos - holder_expr));
   Handle<JSFunction> function = Handle<JSFunction>::cast(
-      GetProperty(global, factory->LookupSymbol(property)));
+      GetProperty(global, factory->LookupUtf8Symbol(property)));
   return Handle<JSObject>(JSObject::cast(function->prototype()));
 }
 
@@ -1870,7 +1887,7 @@
                                      const char* function_name,
                                      BuiltinFunctionId id) {
   Factory* factory = holder->GetIsolate()->factory();
-  Handle<String> name = factory->LookupAsciiSymbol(function_name);
+  Handle<String> name = factory->LookupUtf8Symbol(function_name);
   Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked();
   Handle<JSFunction> function(JSFunction::cast(function_object));
   function->shared()->set_function_data(Smi::FromInt(id));
@@ -1961,7 +1978,7 @@
       native_context->global_object()));
   // Expose the natives in global if a name for it is specified.
   if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
-    Handle<String> natives = factory->LookupAsciiSymbol(FLAG_expose_natives_as);
+    Handle<String> natives = factory->LookupUtf8Symbol(FLAG_expose_natives_as);
     CHECK_NOT_EMPTY_HANDLE(isolate,
                            JSObject::SetLocalPropertyIgnoreAttributes(
                                global, natives,
@@ -1971,7 +1988,8 @@
 
   Handle<Object> Error = GetProperty(global, "Error");
   if (Error->IsJSObject()) {
-    Handle<String> name = factory->LookupAsciiSymbol("stackTraceLimit");
+    Handle<String> name =
+        factory->LookupOneByteSymbol(STATIC_ASCII_VECTOR("stackTraceLimit"));
     Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit));
     CHECK_NOT_EMPTY_HANDLE(isolate,
                            JSObject::SetLocalPropertyIgnoreAttributes(
@@ -1993,7 +2011,7 @@
         native_context->security_token());
 
     Handle<String> debug_string =
-        factory->LookupAsciiSymbol(FLAG_expose_debug_as);
+        factory->LookupUtf8Symbol(FLAG_expose_debug_as);
     Handle<Object> global_proxy(debug->debug_context()->global_proxy());
     CHECK_NOT_EMPTY_HANDLE(isolate,
                            JSObject::SetLocalPropertyIgnoreAttributes(
@@ -2137,7 +2155,7 @@
   Factory* factory = builtins->GetIsolate()->factory();
   for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
     Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
-    Handle<String> name = factory->LookupAsciiSymbol(Builtins::GetName(id));
+    Handle<String> name = factory->LookupUtf8Symbol(Builtins::GetName(id));
     Object* function_object = builtins->GetPropertyNoExceptionThrown(*name);
     Handle<JSFunction> function
         = Handle<JSFunction>(JSFunction::cast(function_object));
diff --git a/src/builtins.cc b/src/builtins.cc
index d62713d..97fcaeb 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -858,23 +858,6 @@
   }
 
   JSObject* object = JSObject::cast(receiver);
-  ElementsKind kind = object->GetElementsKind();
-
-  if (IsHoleyElementsKind(kind)) {
-    bool packed = true;
-    ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
-    for (int i = 0; i < len; i++) {
-      if (!accessor->HasElement(object, object, i, elms)) {
-        packed = false;
-        break;
-      }
-    }
-    if (packed) {
-      kind = GetPackedElementsKind(kind);
-    } else if (!receiver->IsJSArray()) {
-      return CallJsBuiltin(isolate, "ArraySlice", args);
-    }
-  }
 
   ASSERT(len >= 0);
   int n_arguments = args.length() - 1;
@@ -924,6 +907,23 @@
   // Calculate the length of result array.
   int result_len = Max(final - k, 0);
 
+  ElementsKind kind = object->GetElementsKind();
+  if (IsHoleyElementsKind(kind)) {
+    bool packed = true;
+    ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
+    for (int i = k; i < final; i++) {
+      if (!accessor->HasElement(object, object, i, elms)) {
+        packed = false;
+        break;
+      }
+    }
+    if (packed) {
+      kind = GetPackedElementsKind(kind);
+    } else if (!receiver->IsJSArray()) {
+      return CallJsBuiltin(isolate, "ArraySlice", args);
+    }
+  }
+
   JSArray* result_array;
   MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(kind,
                                                              result_len,
diff --git a/src/builtins.h b/src/builtins.h
index a2f752e..1ca4053 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -107,6 +107,8 @@
                                     Code::kNoExtraICState)              \
   V(NotifyLazyDeoptimized,          BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
+  V(NotifyICMiss,                   BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
   V(NotifyOSR,                      BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
                                                                         \
@@ -386,6 +388,7 @@
   static void Generate_NotifyDeoptimized(MacroAssembler* masm);
   static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
   static void Generate_NotifyOSR(MacroAssembler* masm);
+  static void Generate_NotifyICMiss(MacroAssembler* masm);
   static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
 
   static void Generate_FunctionCall(MacroAssembler* masm);
diff --git a/src/checks.cc b/src/checks.cc
index 320fd6b..a6405ec 100644
--- a/src/checks.cc
+++ b/src/checks.cc
@@ -46,7 +46,8 @@
     va_start(arguments, format);
     i::OS::VPrintError(format, arguments);
     va_end(arguments);
-    i::OS::PrintError("\n#\n\n");
+    i::OS::PrintError("\n#\n");
+    i::OS::DumpBacktrace();
   }
   // First two times we may try to print a stack dump.
   if (fatal_error_handler_nesting_depth < 3) {
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
new file mode 100644
index 0000000..74bd93f
--- /dev/null
+++ b/src/code-stubs-hydrogen.cc
@@ -0,0 +1,135 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "code-stubs.h"
+#include "hydrogen.h"
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+
+Handle<Code> HydrogenCodeStub::CodeFromGraph(HGraph* graph) {
+  graph->OrderBlocks();
+  graph->AssignDominators();
+  graph->CollectPhis();
+  graph->InsertRepresentationChanges();
+  graph->EliminateRedundantBoundsChecks();
+  LChunk* chunk = LChunk::NewChunk(graph);
+  ASSERT(chunk != NULL);
+  Handle<Code> stub = chunk->Codegen(Code::COMPILED_STUB);
+  return stub;
+}
+
+
+class CodeStubGraphBuilderBase : public HGraphBuilder {
+ public:
+  CodeStubGraphBuilderBase(Isolate* isolate, HydrogenCodeStub* stub)
+      : HGraphBuilder(&info_), info_(stub, isolate) {}
+  virtual bool BuildGraph();
+
+ protected:
+  virtual void BuildCodeStub() = 0;
+  HParameter* GetParameter(int parameter) { return parameters_[parameter]; }
+  HydrogenCodeStub* stub() { return info_.code_stub(); }
+
+ private:
+  SmartArrayPointer<HParameter*> parameters_;
+  CompilationInfoWithZone info_;
+};
+
+
+bool CodeStubGraphBuilderBase::BuildGraph() {
+  if (FLAG_trace_hydrogen) {
+    PrintF("-----------------------------------------------------------\n");
+    PrintF("Compiling stub using hydrogen\n");
+    HTracer::Instance()->TraceCompilation(&info_);
+  }
+  HBasicBlock* next_block = graph()->CreateBasicBlock();
+  next_block->SetInitialEnvironment(graph()->start_environment());
+  HGoto* jump = new(zone()) HGoto(next_block);
+  graph()->entry_block()->Finish(jump);
+  set_current_block(next_block);
+
+  int major_key = stub()->MajorKey();
+  CodeStubInterfaceDescriptor* descriptor =
+      info_.isolate()->code_stub_interface_descriptor(major_key);
+  if (descriptor->register_param_count_ < 0) {
+    stub()->InitializeInterfaceDescriptor(info_.isolate(), descriptor);
+  }
+  parameters_.Reset(new HParameter*[descriptor->register_param_count_]);
+
+  HGraph* graph = this->graph();
+  Zone* zone = this->zone();
+  for (int i = 0; i < descriptor->register_param_count_; ++i) {
+    HParameter* param = new(zone) HParameter(i);
+    AddInstruction(param);
+    graph->start_environment()->Push(param);
+    parameters_[i] = param;
+  }
+  AddSimulate(BailoutId::StubEntry());
+
+  BuildCodeStub();
+
+  return true;
+}
+
+template <class Stub>
+class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
+ public:
+  explicit CodeStubGraphBuilder(Stub* stub)
+      : CodeStubGraphBuilderBase(Isolate::Current(), stub) {}
+
+ protected:
+  virtual void BuildCodeStub();
+  Stub* casted_stub() { return static_cast<Stub*>(stub()); }
+};
+
+
+template <>
+void CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
+  Zone* zone = this->zone();
+
+  HInstruction* load = BuildUncheckedMonomorphicElementAccess(
+      GetParameter(0), GetParameter(1), NULL, NULL,
+      casted_stub()->is_js_array(), casted_stub()->elements_kind(), false);
+  AddInstruction(load);
+
+  HReturn* ret = new(zone) HReturn(load);
+  current_block()->Finish(ret);
+}
+
+
+Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
+  CodeStubGraphBuilder<KeyedLoadFastElementStub> builder(this);
+  return CodeFromGraph(builder.CreateGraph());
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 276c87e..c7d4c80 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -48,20 +48,6 @@
 }
 
 
-void CodeStub::GenerateCode(MacroAssembler* masm) {
-  // Update the static counter each time a new code stub is generated.
-  masm->isolate()->counters()->code_stubs()->Increment();
-
-  // Nested stubs are not allowed for leaves.
-  AllowStubCallsScope allow_scope(masm, false);
-
-  // Generate the code for the stub.
-  masm->set_generating_stub(true);
-  NoCurrentFrameScope scope(masm);
-  Generate(masm);
-}
-
-
 SmartArrayPointer<const char> CodeStub::GetName() {
   char buffer[100];
   NoAllocationStringAllocator allocator(buffer,
@@ -72,8 +58,7 @@
 }
 
 
-void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
-  Isolate* isolate = masm->isolate();
+void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) {
   SmartArrayPointer<const char> name = GetName();
   PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
   GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
@@ -87,6 +72,39 @@
 }
 
 
+Handle<Code> PlatformCodeStub::GenerateCode() {
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
+
+  // Generate the new code.
+  MacroAssembler masm(isolate, NULL, 256);
+
+  {
+    // Update the static counter each time a new code stub is generated.
+    isolate->counters()->code_stubs()->Increment();
+
+    // Nested stubs are not allowed for leaves.
+    AllowStubCallsScope allow_scope(&masm, false);
+
+    // Generate the code for the stub.
+    masm.set_generating_stub(true);
+    NoCurrentFrameScope scope(&masm);
+    Generate(&masm);
+  }
+
+  // Create the code object.
+  CodeDesc desc;
+  masm.GetCode(&desc);
+
+  // Copy the generated code into a heap object.
+  Code::Flags flags = Code::ComputeFlags(
+      static_cast<Code::Kind>(GetCodeKind()), GetICState());
+  Handle<Code> new_object = factory->NewCode(
+      desc, flags, masm.CodeObject(), NeedsImmovableCode());
+  return new_object;
+}
+
+
 Handle<Code> CodeStub::GetCode() {
   Isolate* isolate = Isolate::Current();
   Factory* factory = isolate->factory();
@@ -102,23 +120,10 @@
   {
     HandleScope scope(isolate);
 
-    // Generate the new code.
-    MacroAssembler masm(isolate, NULL, 256);
-    GenerateCode(&masm);
-
-    // Create the code object.
-    CodeDesc desc;
-    masm.GetCode(&desc);
-
-    // Copy the generated code into a heap object.
-    Code::Flags flags = Code::ComputeFlags(
-        static_cast<Code::Kind>(GetCodeKind()),
-        GetICState());
-    Handle<Code> new_object = factory->NewCode(
-        desc, flags, masm.CodeObject(), NeedsImmovableCode());
+    Handle<Code> new_object = GenerateCode();
     new_object->set_major_key(MajorKey());
     FinishCode(new_object);
-    RecordCodeGeneration(*new_object, &masm);
+    RecordCodeGeneration(*new_object, isolate);
 
 #ifdef ENABLE_DISASSEMBLER
     if (FLAG_print_code_stubs) {
@@ -416,36 +421,8 @@
 }
 
 
-void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
-  switch (elements_kind_) {
-    case FAST_ELEMENTS:
-    case FAST_HOLEY_ELEMENTS:
-    case FAST_SMI_ELEMENTS:
-    case FAST_HOLEY_SMI_ELEMENTS:
-      KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
-      break;
-    case FAST_DOUBLE_ELEMENTS:
-    case FAST_HOLEY_DOUBLE_ELEMENTS:
-      KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
-      break;
-    case EXTERNAL_BYTE_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-    case EXTERNAL_SHORT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS:
-    case EXTERNAL_DOUBLE_ELEMENTS:
-    case EXTERNAL_PIXEL_ELEMENTS:
-      KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
-      break;
-    case DICTIONARY_ELEMENTS:
-      KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
-      break;
-    case NON_STRICT_ARGUMENTS_ELEMENTS:
-      UNREACHABLE();
-      break;
-  }
+void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) {
+  KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
 }
 
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index ae113f5..527abde 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -162,20 +162,29 @@
   // Lookup the code in the (possibly custom) cache.
   bool FindCodeInCache(Code** code_out, Isolate* isolate);
 
+  // Returns information for computing the number key.
+  virtual Major MajorKey() = 0;
+  virtual int MinorKey() = 0;
+
  protected:
   static bool CanUseFPRegisters();
 
- private:
-  // Nonvirtual wrapper around the stub-specific Generate function.  Call
-  // this function to set up the macro assembler and generate the code.
-  void GenerateCode(MacroAssembler* masm);
-
   // Generates the assembler code for the stub.
-  virtual void Generate(MacroAssembler* masm) = 0;
+  virtual Handle<Code> GenerateCode() = 0;
 
+  // BinaryOpStub needs to override this.
+  virtual InlineCacheState GetICState() {
+    return UNINITIALIZED;
+  }
+
+  // Returns whether the code generated for this stub needs to be allocated as
+  // a fixed (non-moveable) code object.
+  virtual bool NeedsImmovableCode() { return false; }
+
+ private:
   // Perform bookkeeping required after code generation when stub code is
   // initially generated.
-  void RecordCodeGeneration(Code* code, MacroAssembler* masm);
+  void RecordCodeGeneration(Code* code, Isolate* isolate);
 
   // Finish the code object after it has been generated.
   virtual void FinishCode(Handle<Code> code) { }
@@ -184,18 +193,9 @@
   // registering stub in the stub cache.
   virtual void Activate(Code* code) { }
 
-  // Returns information for computing the number key.
-  virtual Major MajorKey() = 0;
-  virtual int MinorKey() = 0;
-
   // BinaryOpStub needs to override this.
   virtual int GetCodeKind();
 
-  // BinaryOpStub needs to override this.
-  virtual InlineCacheState GetICState() {
-    return UNINITIALIZED;
-  }
-
   // Add the code to a specialized cache, specific to an individual
   // stub type. Please note, this method must add the code object to a
   // roots object, otherwise we will remove the code during GC.
@@ -213,10 +213,6 @@
   SmartArrayPointer<const char> GetName();
   virtual void PrintName(StringStream* stream);
 
-  // Returns whether the code generated for this stub needs to be allocated as
-  // a fixed (non-moveable) code object.
-  virtual bool NeedsImmovableCode() { return false; }
-
   // Computes the key based on major and minor.
   uint32_t GetKey() {
     ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
@@ -232,6 +228,51 @@
 };
 
 
+class PlatformCodeStub : public CodeStub {
+ public:
+  // Retrieve the code for the stub. Generate the code if needed.
+  virtual Handle<Code> GenerateCode();
+
+  virtual int GetCodeKind() { return Code::STUB; }
+
+ protected:
+  // Generates the assembler code for the stub.
+  virtual void Generate(MacroAssembler* masm) = 0;
+};
+
+
+struct CodeStubInterfaceDescriptor {
+  CodeStubInterfaceDescriptor()
+      : register_param_count_(-1),
+        register_params_(NULL) { }
+  int register_param_count_;
+  Register* register_params_;
+  Handle<Code> deoptimization_handler_;
+};
+
+
+class HGraph;
+struct Register;
+class HydrogenCodeStub : public CodeStub {
+ public:
+  // Retrieve the code for the stub. Generate the code if needed.
+  virtual Handle<Code> GenerateCode() = 0;
+
+  virtual int GetCodeKind() { return Code::COMPILED_STUB; }
+
+  CodeStubInterfaceDescriptor* GetInterfaceDescriptor(Isolate* isolate) {
+    return isolate->code_stub_interface_descriptor(MajorKey());
+  }
+
+  virtual void InitializeInterfaceDescriptor(
+      Isolate* isolate,
+      CodeStubInterfaceDescriptor* descriptor) = 0;
+
+ protected:
+  Handle<Code> CodeFromGraph(HGraph* graph);
+};
+
+
 // Helper interface to prepare to/restore after making runtime calls.
 class RuntimeCallHelper {
  public:
@@ -289,7 +330,7 @@
 };
 
 
-class StackCheckStub : public CodeStub {
+class StackCheckStub : public PlatformCodeStub {
  public:
   StackCheckStub() { }
 
@@ -301,7 +342,7 @@
 };
 
 
-class InterruptStub : public CodeStub {
+class InterruptStub : public PlatformCodeStub {
  public:
   InterruptStub() { }
 
@@ -313,7 +354,7 @@
 };
 
 
-class ToNumberStub: public CodeStub {
+class ToNumberStub: public PlatformCodeStub {
  public:
   ToNumberStub() { }
 
@@ -325,7 +366,7 @@
 };
 
 
-class FastNewClosureStub : public CodeStub {
+class FastNewClosureStub : public PlatformCodeStub {
  public:
   explicit FastNewClosureStub(LanguageMode language_mode)
     : language_mode_(language_mode) { }
@@ -341,7 +382,7 @@
 };
 
 
-class FastNewContextStub : public CodeStub {
+class FastNewContextStub : public PlatformCodeStub {
  public:
   static const int kMaximumSlots = 64;
 
@@ -359,7 +400,7 @@
 };
 
 
-class FastNewBlockContextStub : public CodeStub {
+class FastNewBlockContextStub : public PlatformCodeStub {
  public:
   static const int kMaximumSlots = 64;
 
@@ -377,7 +418,7 @@
 };
 
 
-class FastCloneShallowArrayStub : public CodeStub {
+class FastCloneShallowArrayStub : public PlatformCodeStub {
  public:
   // Maximum length of copied elements array.
   static const int kMaximumClonedLength = 8;
@@ -410,7 +451,7 @@
 };
 
 
-class FastCloneShallowObjectStub : public CodeStub {
+class FastCloneShallowObjectStub : public PlatformCodeStub {
  public:
   // Maximum number of properties in copied object.
   static const int kMaximumClonedProperties = 6;
@@ -430,7 +471,7 @@
 };
 
 
-class InstanceofStub: public CodeStub {
+class InstanceofStub: public PlatformCodeStub {
  public:
   enum Flags {
     kNoFlags = 0,
@@ -468,7 +509,7 @@
 };
 
 
-class MathPowStub: public CodeStub {
+class MathPowStub: public PlatformCodeStub {
  public:
   enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
 
@@ -484,7 +525,7 @@
 };
 
 
-class BinaryOpStub: public CodeStub {
+class BinaryOpStub: public PlatformCodeStub {
  public:
   BinaryOpStub(Token::Value op, OverwriteMode mode)
       : op_(op),
@@ -600,7 +641,7 @@
 };
 
 
-class ICCompareStub: public CodeStub {
+class ICCompareStub: public PlatformCodeStub {
  public:
   ICCompareStub(Token::Value op,
                 CompareIC::State left,
@@ -666,7 +707,7 @@
 };
 
 
-class CEntryStub : public CodeStub {
+class CEntryStub : public PlatformCodeStub {
  public:
   explicit CEntryStub(int result_size,
                       SaveFPRegsMode save_doubles = kDontSaveFPRegs)
@@ -700,7 +741,7 @@
 };
 
 
-class JSEntryStub : public CodeStub {
+class JSEntryStub : public PlatformCodeStub {
  public:
   JSEntryStub() { }
 
@@ -734,7 +775,7 @@
 };
 
 
-class ArgumentsAccessStub: public CodeStub {
+class ArgumentsAccessStub: public PlatformCodeStub {
  public:
   enum Type {
     READ_ELEMENT,
@@ -761,7 +802,7 @@
 };
 
 
-class RegExpExecStub: public CodeStub {
+class RegExpExecStub: public PlatformCodeStub {
  public:
   RegExpExecStub() { }
 
@@ -773,7 +814,7 @@
 };
 
 
-class RegExpConstructResultStub: public CodeStub {
+class RegExpConstructResultStub: public PlatformCodeStub {
  public:
   RegExpConstructResultStub() { }
 
@@ -785,7 +826,7 @@
 };
 
 
-class CallFunctionStub: public CodeStub {
+class CallFunctionStub: public PlatformCodeStub {
  public:
   CallFunctionStub(int argc, CallFunctionFlags flags)
       : argc_(argc), flags_(flags) { }
@@ -826,7 +867,7 @@
 };
 
 
-class CallConstructStub: public CodeStub {
+class CallConstructStub: public PlatformCodeStub {
  public:
   explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {}
 
@@ -1017,25 +1058,54 @@
 };
 
 
-class KeyedLoadElementStub : public CodeStub {
+class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
  public:
-  explicit KeyedLoadElementStub(ElementsKind elements_kind)
-      : elements_kind_(elements_kind)
-  { }
+  KeyedLoadDictionaryElementStub() {}
 
   Major MajorKey() { return KeyedLoadElement; }
-  int MinorKey() { return elements_kind_; }
+  int MinorKey() { return DICTIONARY_ELEMENTS; }
 
   void Generate(MacroAssembler* masm);
 
  private:
-  ElementsKind elements_kind_;
-
-  DISALLOW_COPY_AND_ASSIGN(KeyedLoadElementStub);
+  DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
 };
 
 
-class KeyedStoreElementStub : public CodeStub {
+class KeyedLoadFastElementStub : public HydrogenCodeStub {
+ public:
+  KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
+    bit_field_ = ElementsKindBits::encode(elements_kind) |
+        IsJSArrayBits::encode(is_js_array);
+  }
+
+  Major MajorKey() { return KeyedLoadElement; }
+  int MinorKey() { return bit_field_; }
+
+  bool is_js_array() const {
+    return IsJSArrayBits::decode(bit_field_);
+  }
+
+  ElementsKind elements_kind() const {
+    return ElementsKindBits::decode(bit_field_);
+  }
+
+  virtual Handle<Code> GenerateCode();
+
+  virtual void InitializeInterfaceDescriptor(
+      Isolate* isolate,
+      CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+  class IsJSArrayBits: public BitField<bool, 8, 1> {};
+  class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
+  uint32_t bit_field_;
+
+  DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub);
+};
+
+
+class KeyedStoreElementStub : public PlatformCodeStub {
  public:
   KeyedStoreElementStub(bool is_js_array,
                         ElementsKind elements_kind,
@@ -1070,7 +1140,7 @@
 };
 
 
-class ToBooleanStub: public CodeStub {
+class ToBooleanStub: public PlatformCodeStub {
  public:
   enum Type {
     UNDEFINED,
@@ -1140,7 +1210,7 @@
 };
 
 
-class ElementsTransitionAndStoreStub : public CodeStub {
+class ElementsTransitionAndStoreStub : public PlatformCodeStub {
  public:
   ElementsTransitionAndStoreStub(ElementsKind from,
                                  ElementsKind to,
@@ -1181,7 +1251,7 @@
 };
 
 
-class StoreArrayLiteralElementStub : public CodeStub {
+class StoreArrayLiteralElementStub : public PlatformCodeStub {
  public:
   StoreArrayLiteralElementStub()
         : fp_registers_(CanUseFPRegisters()) { }
@@ -1200,7 +1270,7 @@
 };
 
 
-class ProfileEntryHookStub : public CodeStub {
+class ProfileEntryHookStub : public PlatformCodeStub {
  public:
   explicit ProfileEntryHookStub() {}
 
diff --git a/src/codegen.cc b/src/codegen.cc
index 83ac854..7112f36 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -76,7 +76,13 @@
 
   if (FLAG_trace_codegen || print_source || print_ast) {
     PrintF("*** Generate code for %s function: ", ftype);
-    info->function()->name()->ShortPrint();
+    if (info->IsStub()) {
+      const char* name =
+          CodeStub::MajorName(info->code_stub()->MajorKey(), true);
+      PrintF("%s", name == NULL ? "<unknown>" : name);
+    } else {
+      info->function()->name()->ShortPrint();
+    }
     PrintF(" ***\n");
   }
 
@@ -121,19 +127,21 @@
   if (print_code) {
     // Print the source code if available.
     FunctionLiteral* function = info->function();
-    Handle<Script> script = info->script();
-    if (!script->IsUndefined() && !script->source()->IsUndefined()) {
-      PrintF("--- Raw source ---\n");
-      StringInputBuffer stream(String::cast(script->source()));
-      stream.Seek(function->start_position());
-      // fun->end_position() points to the last character in the stream. We
-      // need to compensate by adding one to calculate the length.
-      int source_len =
-          function->end_position() - function->start_position() + 1;
-      for (int i = 0; i < source_len; i++) {
-        if (stream.has_more()) PrintF("%c", stream.GetNext());
+    if (code->kind() != Code::COMPILED_STUB) {
+      Handle<Script> script = info->script();
+      if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+        PrintF("--- Raw source ---\n");
+        StringInputBuffer stream(String::cast(script->source()));
+        stream.Seek(function->start_position());
+        // fun->end_position() points to the last character in the stream. We
+        // need to compensate by adding one to calculate the length.
+        int source_len =
+            function->end_position() - function->start_position() + 1;
+        for (int i = 0; i < source_len; i++) {
+          if (stream.has_more()) PrintF("%c", stream.GetNext());
+        }
+        PrintF("\n\n");
       }
-      PrintF("\n\n");
     }
     if (info->IsOptimizing()) {
       if (FLAG_print_unopt_code) {
@@ -145,7 +153,12 @@
     } else {
       PrintF("--- Code ---\n");
     }
-    code->Disassemble(*function->debug_name()->ToCString());
+    if (info->IsStub()) {
+      CodeStub::Major major_key = info->code_stub()->MajorKey();
+      code->Disassemble(CodeStub::MajorName(major_key, false));
+    } else {
+      code->Disassemble(*function->debug_name()->ToCString());
+    }
   }
 #endif  // ENABLE_DISASSEMBLER
 }
diff --git a/src/compiler.cc b/src/compiler.cc
index 5779aae..ceac829 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -55,7 +55,7 @@
     : flags_(LanguageModeField::encode(CLASSIC_MODE)),
       script_(script),
       osr_ast_id_(BailoutId::None()) {
-  Initialize(zone);
+  Initialize(script->GetIsolate(), BASE, zone);
 }
 
 
@@ -65,7 +65,7 @@
       shared_info_(shared_info),
       script_(Handle<Script>(Script::cast(shared_info->script()))),
       osr_ast_id_(BailoutId::None()) {
-  Initialize(zone);
+  Initialize(script_->GetIsolate(), BASE, zone);
 }
 
 
@@ -76,12 +76,22 @@
       script_(Handle<Script>(Script::cast(shared_info_->script()))),
       context_(closure->context()),
       osr_ast_id_(BailoutId::None()) {
-  Initialize(zone);
+  Initialize(script_->GetIsolate(), BASE, zone);
 }
 
 
-void CompilationInfo::Initialize(Zone* zone) {
-  isolate_ = script_->GetIsolate();
+CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
+                                 Isolate* isolate, Zone* zone)
+    : flags_(LanguageModeField::encode(CLASSIC_MODE) |
+             IsLazy::encode(true)),
+      osr_ast_id_(BailoutId::None()) {
+  Initialize(isolate, STUB, zone);
+  code_stub_ = stub;
+}
+
+
+void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
+  isolate_ = isolate;
   function_ = NULL;
   scope_ = NULL;
   global_scope_ = NULL;
@@ -89,8 +99,13 @@
   pre_parse_data_ = NULL;
   zone_ = zone;
   deferred_handles_ = NULL;
+  code_stub_ = NULL;
   prologue_offset_ = kPrologueOffsetNotSet;
-  mode_ = V8::UseCrankshaft() ? BASE : NONOPT;
+  if (mode == STUB) {
+    mode_ = STUB;
+    return;
+  }
+  mode_ = V8::UseCrankshaft() ? mode : NONOPT;
   if (script_->type()->value() == Script::TYPE_NATIVE) {
     MarkAsNative();
   }
@@ -107,6 +122,33 @@
 }
 
 
+int CompilationInfo::num_parameters() const {
+  if (IsStub()) {
+    return 0;
+  } else {
+    return scope()->num_parameters();
+  }
+}
+
+
+int CompilationInfo::num_heap_slots() const {
+  if (IsStub()) {
+    return 0;
+  } else {
+    return scope()->num_heap_slots();
+  }
+}
+
+
+Code::Flags CompilationInfo::flags() const {
+  if (IsStub()) {
+    return Code::ComputeFlags(Code::COMPILED_STUB);
+  } else {
+    return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
+  }
+}
+
+
 // Disable optimization for the rest of the compilation pipeline.
 void CompilationInfo::DisableOptimization() {
   bool is_optimizable_closure =
@@ -317,13 +359,13 @@
   if (FLAG_trace_hydrogen) {
     PrintF("-----------------------------------------------------------\n");
     PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
-    HTracer::Instance()->TraceCompilation(info()->function());
+    HTracer::Instance()->TraceCompilation(info());
   }
   Handle<Context> native_context(
       info()->closure()->context()->native_context());
   oracle_ = new(info()->zone()) TypeFeedbackOracle(
       code, native_context, info()->isolate(), info()->zone());
-  graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_);
+  graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info(), oracle_);
 
   Timer t(this, &time_taken_to_create_graph_);
   graph_ = graph_builder_->CreateGraph();
@@ -376,7 +418,7 @@
     Timer timer(this, &time_taken_to_codegen_);
     ASSERT(chunk_ != NULL);
     ASSERT(graph_ != NULL);
-    Handle<Code> optimized_code = chunk_->Codegen();
+    Handle<Code> optimized_code = chunk_->Codegen(Code::OPTIMIZED_FUNCTION);
     if (optimized_code.is_null()) {
       info()->set_bailout_reason("code generation failed");
       return AbortOptimization();
diff --git a/src/compiler.h b/src/compiler.h
index 653d5f1..6d374d9 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -38,6 +38,7 @@
 static const int kPrologueOffsetNotSet = -1;
 
 class ScriptDataImpl;
+class HydrogenCodeStub;
 
 // CompilationInfo encapsulates some information known at compile time.  It
 // is constructed based on the resources available at compile-time.
@@ -46,6 +47,7 @@
   CompilationInfo(Handle<Script> script, Zone* zone);
   CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
   CompilationInfo(Handle<JSFunction> closure, Zone* zone);
+  CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
 
   virtual ~CompilationInfo();
 
@@ -72,10 +74,14 @@
   Handle<JSFunction> closure() const { return closure_; }
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
   Handle<Script> script() const { return script_; }
+  HydrogenCodeStub* code_stub() {return code_stub_; }
   v8::Extension* extension() const { return extension_; }
   ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
   Handle<Context> context() const { return context_; }
   BailoutId osr_ast_id() const { return osr_ast_id_; }
+  int num_parameters() const;
+  int num_heap_slots() const;
+  Code::Flags flags() const;
 
   void MarkAsEval() {
     ASSERT(!is_lazy());
@@ -98,9 +104,31 @@
   void MarkAsNative() {
     flags_ |= IsNative::encode(true);
   }
+
   bool is_native() const {
     return IsNative::decode(flags_);
   }
+
+  bool is_calling() const {
+    return is_deferred_calling() || is_non_deferred_calling();
+  }
+
+  void MarkAsDeferredCalling() {
+    flags_ |= IsDeferredCalling::encode(true);
+  }
+
+  bool is_deferred_calling() const {
+    return IsDeferredCalling::decode(flags_);
+  }
+
+  void MarkAsNonDeferredCalling() {
+    flags_ |= IsNonDeferredCalling::encode(true);
+  }
+
+  bool is_non_deferred_calling() const {
+    return IsNonDeferredCalling::decode(flags_);
+  }
+
   void SetFunction(FunctionLiteral* literal) {
     ASSERT(function_ == NULL);
     function_ = literal;
@@ -151,6 +179,7 @@
   // Accessors for the different compilation modes.
   bool IsOptimizing() const { return mode_ == OPTIMIZE; }
   bool IsOptimizable() const { return mode_ == BASE; }
+  bool IsStub() const { return mode_ == STUB; }
   void SetOptimizing(BailoutId osr_ast_id) {
     SetMode(OPTIMIZE);
     osr_ast_id_ = osr_ast_id;
@@ -209,10 +238,11 @@
   enum Mode {
     BASE,
     OPTIMIZE,
-    NONOPT
+    NONOPT,
+    STUB
   };
 
-  void Initialize(Zone* zone);
+  void Initialize(Isolate* isolate, Mode mode, Zone* zone);
 
   void SetMode(Mode mode) {
     ASSERT(V8::UseCrankshaft());
@@ -238,6 +268,12 @@
   // If compiling for debugging produce just full code matching the
   // initial mode setting.
   class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
+  // If the compiled code contains calls that require building a frame
+  class IsCalling: public BitField<bool, 9, 1> {};
+  // If the compiled code contains calls that require building a frame
+  class IsDeferredCalling: public BitField<bool, 10, 1> {};
+  // If the compiled code contains calls that require building a frame
+  class IsNonDeferredCalling: public BitField<bool, 11, 1> {};
 
 
   unsigned flags_;
@@ -250,6 +286,8 @@
   Scope* scope_;
   // The global scope provided as a convenience.
   Scope* global_scope_;
+  // For compiled stubs, the stub object
+  HydrogenCodeStub* code_stub_;
   // The compiled code.
   Handle<Code> code_;
 
@@ -310,6 +348,10 @@
       : CompilationInfo(closure, &zone_),
         zone_(closure->GetIsolate()),
         zone_scope_(&zone_, DELETE_ON_EXIT) {}
+  explicit CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
+      : CompilationInfo(stub, isolate, &zone_),
+        zone_(isolate),
+        zone_scope_(&zone_, DELETE_ON_EXIT) {}
 
  private:
   Zone zone_;
@@ -335,7 +377,7 @@
 
 
 class HGraph;
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
 class LChunk;
 
 // A helper class that calls the three compilation phases in
@@ -377,7 +419,7 @@
  private:
   CompilationInfo* info_;
   TypeFeedbackOracle* oracle_;
-  HGraphBuilder* graph_builder_;
+  HOptimizedGraphBuilder* graph_builder_;
   HGraph* graph_;
   LChunk* chunk_;
   int64_t time_taken_to_create_graph_;
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
index e856222..811c00e 100644
--- a/src/debug-agent.cc
+++ b/src/debug-agent.cc
@@ -192,21 +192,14 @@
     }
 
     // Convert UTF-8 to UTF-16.
-    unibrow::Utf8InputBuffer<> buf(msg, StrLength(msg));
-    int len = 0;
-    while (buf.has_more()) {
-      buf.GetNext();
-      len++;
-    }
-    ScopedVector<int16_t> temp(len + 1);
-    buf.Reset(msg, StrLength(msg));
-    for (int i = 0; i < len; i++) {
-      temp[i] = buf.GetNext();
-    }
+    unibrow::Utf8Decoder<128> decoder(msg, StrLength(msg));
+    int utf16_length = decoder.Utf16Length();
+    ScopedVector<uint16_t> temp(utf16_length + 1);
+    decoder.WriteUtf16(temp.start(), utf16_length);
 
     // Send the request received to the debugger.
-    v8::Debug::SendCommand(reinterpret_cast<const uint16_t *>(temp.start()),
-                           len,
+    v8::Debug::SendCommand(temp.start(),
+                           utf16_length,
                            NULL,
                            reinterpret_cast<v8::Isolate*>(agent_->isolate()));
 
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index ea1a17d..eef12a9 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -2143,16 +2143,14 @@
     additional_context_object = {};
     for (var i = 0; i < additional_context.length; i++) {
       var mapping = additional_context[i];
-      if (!IS_STRING(mapping.name) || !IS_NUMBER(mapping.handle)) {
+
+      if (!IS_STRING(mapping.name)) {
         return response.failed("Context element #" + i +
-            " must contain name:string and handle:number");
+            " doesn't contain name:string property");
       }
-      var context_value_mirror = LookupMirror(mapping.handle);
-      if (!context_value_mirror) {
-        return response.failed("Context object '" + mapping.name +
-            "' #" + mapping.handle + "# not found");
-      }
-      additional_context_object[mapping.name] = context_value_mirror.value();
+
+      var raw_value = DebugCommandProcessor.resolveValue_(mapping);
+      additional_context_object[mapping.name] = raw_value;
     }
   }
 
diff --git a/src/debug.cc b/src/debug.cc
index ea1c084..17897dd 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -836,7 +836,8 @@
   isolate_->set_context(*context);
 
   // Expose the builtins object in the debugger context.
-  Handle<String> key = isolate_->factory()->LookupAsciiSymbol("builtins");
+  Handle<String> key = isolate_->factory()->LookupOneByteSymbol(
+      STATIC_ASCII_VECTOR("builtins"));
   Handle<GlobalObject> global = Handle<GlobalObject>(context->global_object());
   RETURN_IF_EMPTY_HANDLE_VALUE(
       isolate_,
@@ -1100,7 +1101,8 @@
 
   // Get the function IsBreakPointTriggered (defined in debug-debugger.js).
   Handle<String> is_break_point_triggered_symbol =
-      factory->LookupAsciiSymbol("IsBreakPointTriggered");
+      factory->LookupOneByteSymbol(
+          STATIC_ASCII_VECTOR("IsBreakPointTriggered"));
   Handle<JSFunction> check_break_point =
     Handle<JSFunction>(JSFunction::cast(
         debug_context()->global_object()->GetPropertyNoExceptionThrown(
@@ -2425,8 +2427,8 @@
   ASSERT(isolate_->context() == *Debug::debug_context());
 
   // Clear the mirror cache.
-  Handle<String> function_name =
-      isolate_->factory()->LookupSymbol(CStrVector("ClearMirrorCache"));
+  Handle<String> function_name = isolate_->factory()->LookupOneByteSymbol(
+      STATIC_ASCII_VECTOR("ClearMirrorCache"));
   Handle<Object> fun(
       Isolate::Current()->global_object()->GetPropertyNoExceptionThrown(
       *function_name));
@@ -2554,7 +2556,7 @@
 
   // Create the execution state object.
   Handle<String> constructor_str =
-      isolate_->factory()->LookupSymbol(constructor_name);
+      isolate_->factory()->LookupUtf8Symbol(constructor_name);
   Handle<Object> constructor(
       isolate_->global_object()->GetPropertyNoExceptionThrown(
           *constructor_str));
@@ -2785,7 +2787,8 @@
 
   // Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
   Handle<String> update_script_break_points_symbol =
-      isolate_->factory()->LookupAsciiSymbol("UpdateScriptBreakPoints");
+      isolate_->factory()->LookupOneByteSymbol(
+          STATIC_ASCII_VECTOR("UpdateScriptBreakPoints"));
   Handle<Object> update_script_break_points =
       Handle<Object>(debug->debug_context()->global_object()->
           GetPropertyNoExceptionThrown(*update_script_break_points_symbol));
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 8f17111..a0b50ed 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -79,6 +79,36 @@
 #endif
 
 
+Code* DeoptimizerData::FindDeoptimizingCode(Address addr) {
+  for (DeoptimizingCodeListNode* node = deoptimizing_code_list_;
+       node != NULL;
+       node = node->next()) {
+    if (node->code()->contains(addr)) return *node->code();
+  }
+  return NULL;
+}
+
+
+void DeoptimizerData::RemoveDeoptimizingCode(Code* code) {
+  for (DeoptimizingCodeListNode *prev = NULL, *cur = deoptimizing_code_list_;
+       cur != NULL;
+       prev = cur, cur = cur->next()) {
+    if (*cur->code() == code) {
+      if (prev == NULL) {
+        deoptimizing_code_list_ = cur->next();
+      } else {
+        prev->set_next(cur->next());
+      }
+      delete cur;
+      return;
+    }
+  }
+  // Deoptimizing code is removed through weak callback. Each object is expected
+  // to be removed once and only once.
+  UNREACHABLE();
+}
+
+
 // We rely on this function not causing a GC.  It is called from generated code
 // without having a real stack frame in place.
 Deoptimizer* Deoptimizer::New(JSFunction* function,
@@ -247,45 +277,6 @@
 }
 
 
-class DeoptimizingVisitor : public OptimizedFunctionVisitor {
- public:
-  virtual void EnterContext(Context* context) {
-    if (FLAG_trace_deopt) {
-      PrintF("[deoptimize context: %" V8PRIxPTR "]\n",
-             reinterpret_cast<intptr_t>(context));
-    }
-  }
-
-  virtual void VisitFunction(JSFunction* function) {
-    Deoptimizer::DeoptimizeFunction(function);
-  }
-
-  virtual void LeaveContext(Context* context) {
-    context->ClearOptimizedFunctions();
-  }
-};
-
-
-void Deoptimizer::DeoptimizeAll() {
-  AssertNoAllocation no_allocation;
-
-  if (FLAG_trace_deopt) {
-    PrintF("[deoptimize all contexts]\n");
-  }
-
-  DeoptimizingVisitor visitor;
-  VisitAllOptimizedFunctions(&visitor);
-}
-
-
-void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
-  AssertNoAllocation no_allocation;
-
-  DeoptimizingVisitor visitor;
-  VisitAllOptimizedFunctionsForGlobalObject(object, &visitor);
-}
-
-
 void Deoptimizer::VisitAllOptimizedFunctionsForContext(
     Context* context, OptimizedFunctionVisitor* visitor) {
   Isolate* isolate = context->GetIsolate();
@@ -315,22 +306,6 @@
 }
 
 
-void Deoptimizer::VisitAllOptimizedFunctionsForGlobalObject(
-    JSObject* object, OptimizedFunctionVisitor* visitor) {
-  AssertNoAllocation no_allocation;
-
-  if (object->IsJSGlobalProxy()) {
-    Object* proto = object->GetPrototype();
-    ASSERT(proto->IsJSGlobalObject());
-    VisitAllOptimizedFunctionsForContext(
-        GlobalObject::cast(proto)->native_context(), visitor);
-  } else if (object->IsGlobalObject()) {
-    VisitAllOptimizedFunctionsForContext(
-        GlobalObject::cast(object)->native_context(), visitor);
-  }
-}
-
-
 void Deoptimizer::VisitAllOptimizedFunctions(
     OptimizedFunctionVisitor* visitor) {
   AssertNoAllocation no_allocation;
@@ -338,28 +313,160 @@
   // Run through the list of all native contexts and deoptimize.
   Object* context = Isolate::Current()->heap()->native_contexts_list();
   while (!context->IsUndefined()) {
-    // GC can happen when the context is not fully initialized,
-    // so the global field of the context can be undefined.
-    Object* global = Context::cast(context)->get(Context::GLOBAL_OBJECT_INDEX);
-    if (!global->IsUndefined()) {
-      VisitAllOptimizedFunctionsForGlobalObject(JSObject::cast(global),
-                                                visitor);
-    }
+    VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
   }
 }
 
 
-void Deoptimizer::HandleWeakDeoptimizedCode(
-    v8::Persistent<v8::Value> obj, void* data) {
+// Removes the functions selected by the given filter from the optimized
+// function list of the given context and partitions the removed functions
+// into one or more lists such that all functions in a list share the same
+// code. The head of each list is written in the deoptimizing_functions field
+// of the corresponding code object.
+// The found code objects are returned in the given zone list.
+static void PartitionOptimizedFunctions(Context* context,
+                                        OptimizedFunctionFilter* filter,
+                                        ZoneList<Code*>* partitions,
+                                        Zone* zone,
+                                        Object* undefined) {
+  AssertNoAllocation no_allocation;
+  Object* current = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
+  Object* remainder_head = undefined;
+  Object* remainder_tail = undefined;
+  ASSERT_EQ(0, partitions->length());
+  while (current != undefined) {
+    JSFunction* function = JSFunction::cast(current);
+    current = function->next_function_link();
+    if (filter->TakeFunction(function)) {
+      Code* code = function->code();
+      if (code->deoptimizing_functions() == undefined) {
+        partitions->Add(code, zone);
+      } else {
+        ASSERT(partitions->Contains(code));
+      }
+      function->set_next_function_link(code->deoptimizing_functions());
+      code->set_deoptimizing_functions(function);
+    } else {
+      if (remainder_head == undefined) {
+        remainder_head = function;
+      } else {
+        JSFunction::cast(remainder_tail)->set_next_function_link(function);
+      }
+      remainder_tail = function;
+    }
+  }
+  if (remainder_tail != undefined) {
+    JSFunction::cast(remainder_tail)->set_next_function_link(undefined);
+  }
+  context->set(Context::OPTIMIZED_FUNCTIONS_LIST, remainder_head);
+}
+
+
+class DeoptimizeAllFilter : public OptimizedFunctionFilter {
+ public:
+  virtual bool TakeFunction(JSFunction* function) {
+    return true;
+  }
+};
+
+
+class DeoptimizeWithMatchingCodeFilter : public OptimizedFunctionFilter {
+ public:
+  explicit DeoptimizeWithMatchingCodeFilter(Code* code) : code_(code) {}
+  virtual bool TakeFunction(JSFunction* function) {
+    return function->code() == code_;
+  }
+ private:
+  Code* code_;
+};
+
+
+void Deoptimizer::DeoptimizeAll() {
+  AssertNoAllocation no_allocation;
+
+  if (FLAG_trace_deopt) {
+    PrintF("[deoptimize all contexts]\n");
+  }
+
+  DeoptimizeAllFilter filter;
+  DeoptimizeAllFunctionsWith(&filter);
+}
+
+
+void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
+  AssertNoAllocation no_allocation;
+  DeoptimizeAllFilter filter;
+  if (object->IsJSGlobalProxy()) {
+    Object* proto = object->GetPrototype();
+    ASSERT(proto->IsJSGlobalObject());
+    DeoptimizeAllFunctionsForContext(
+        GlobalObject::cast(proto)->native_context(), &filter);
+  } else if (object->IsGlobalObject()) {
+    DeoptimizeAllFunctionsForContext(
+        GlobalObject::cast(object)->native_context(), &filter);
+  }
+}
+
+
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+  if (!function->IsOptimized()) return;
+  Code* code = function->code();
+  Context* context = function->context()->native_context();
+  Isolate* isolate = context->GetIsolate();
+  Object* undefined = isolate->heap()->undefined_value();
+  Zone* zone = isolate->runtime_zone();
+  ZoneScope zone_scope(zone, DELETE_ON_EXIT);
+  ZoneList<Code*> codes(1, zone);
+  DeoptimizeWithMatchingCodeFilter filter(code);
+  PartitionOptimizedFunctions(context, &filter, &codes, zone, undefined);
+  ASSERT_EQ(1, codes.length());
+  DeoptimizeFunctionWithPreparedFunctionList(
+      JSFunction::cast(codes.at(0)->deoptimizing_functions()));
+  codes.at(0)->set_deoptimizing_functions(undefined);
+}
+
+
+void Deoptimizer::DeoptimizeAllFunctionsForContext(
+    Context* context, OptimizedFunctionFilter* filter) {
+  ASSERT(context->IsNativeContext());
+  Isolate* isolate = context->GetIsolate();
+  Object* undefined = isolate->heap()->undefined_value();
+  Zone* zone = isolate->runtime_zone();
+  ZoneScope zone_scope(zone, DELETE_ON_EXIT);
+  ZoneList<Code*> codes(1, zone);
+  PartitionOptimizedFunctions(context, filter, &codes, zone, undefined);
+  for (int i = 0; i < codes.length(); ++i) {
+    DeoptimizeFunctionWithPreparedFunctionList(
+        JSFunction::cast(codes.at(i)->deoptimizing_functions()));
+    codes.at(i)->set_deoptimizing_functions(undefined);
+  }
+}
+
+
+void Deoptimizer::DeoptimizeAllFunctionsWith(OptimizedFunctionFilter* filter) {
+  AssertNoAllocation no_allocation;
+
+  // Run through the list of all native contexts and deoptimize.
+  Object* context = Isolate::Current()->heap()->native_contexts_list();
+  while (!context->IsUndefined()) {
+    DeoptimizeAllFunctionsForContext(Context::cast(context), filter);
+    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+  }
+}
+
+
+void Deoptimizer::HandleWeakDeoptimizedCode(v8::Persistent<v8::Value> obj,
+                                            void* parameter) {
   DeoptimizingCodeListNode* node =
-      reinterpret_cast<DeoptimizingCodeListNode*>(data);
-  RemoveDeoptimizingCode(*node->code());
+      reinterpret_cast<DeoptimizingCodeListNode*>(parameter);
+  DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
+  data->RemoveDeoptimizingCode(*node->code());
 #ifdef DEBUG
-  node = Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
-  while (node != NULL) {
-    ASSERT(node != reinterpret_cast<DeoptimizingCodeListNode*>(data));
-    node = node->next();
+  for (DeoptimizingCodeListNode* current = data->deoptimizing_code_list_;
+       current != NULL;
+       current = current->next()) {
+    ASSERT(current != node);
   }
 #endif
 }
@@ -370,6 +477,35 @@
 }
 
 
+bool Deoptimizer::TraceEnabledFor(BailoutType type) {
+  switch (type) {
+    case EAGER:
+    case LAZY:
+    case DEBUGGER:
+      return FLAG_trace_deopt;
+    case OSR:
+      return FLAG_trace_osr;
+  }
+  UNREACHABLE();
+  return false;
+}
+
+
+const char* Deoptimizer::MessageFor(BailoutType type) {
+  switch (type) {
+    case EAGER:
+    case LAZY:
+      return "DEOPT";
+    case DEBUGGER:
+      return "DEOPT FOR DEBUGGER";
+    case OSR:
+      return "OSR";
+  }
+  UNREACHABLE();
+  return false;
+}
+
+
 Deoptimizer::Deoptimizer(Isolate* isolate,
                          JSFunction* function,
                          BailoutType type,
@@ -391,64 +527,16 @@
       deferred_arguments_objects_values_(0),
       deferred_arguments_objects_(0),
       deferred_heap_numbers_(0) {
-  if (FLAG_trace_deopt && type != OSR) {
-    if (type == DEBUGGER) {
-      PrintF("**** DEOPT FOR DEBUGGER: ");
-    } else {
-      PrintF("**** DEOPT: ");
-    }
-    function->PrintName();
-    PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
-           bailout_id,
-           reinterpret_cast<intptr_t>(from),
-           fp_to_sp_delta - (2 * kPointerSize));
-  } else if (FLAG_trace_osr && type == OSR) {
-    PrintF("**** OSR: ");
-    function->PrintName();
-    PrintF(" at ast id #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
-           bailout_id,
-           reinterpret_cast<intptr_t>(from),
-           fp_to_sp_delta - (2 * kPointerSize));
+  // For COMPILED_STUBs called from builtins, the function pointer is a SMI
+  // indicating an internal frame.
+  if (function->IsSmi()) {
+    function = NULL;
   }
-  function->shared()->increment_deopt_count();
-  // Find the optimized code.
-  if (type == EAGER) {
-    ASSERT(from == NULL);
-    optimized_code_ = function_->code();
-    if (FLAG_trace_deopt && FLAG_code_comments) {
-      // Print instruction associated with this bailout.
-      const char* last_comment = NULL;
-      int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
-          | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-      for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
-        RelocInfo* info = it.rinfo();
-        if (info->rmode() == RelocInfo::COMMENT) {
-          last_comment = reinterpret_cast<const char*>(info->data());
-        }
-        if (info->rmode() == RelocInfo::RUNTIME_ENTRY) {
-          unsigned id = Deoptimizer::GetDeoptimizationId(
-              info->target_address(), Deoptimizer::EAGER);
-          if (id == bailout_id && last_comment != NULL) {
-            PrintF("            %s\n", last_comment);
-            break;
-          }
-        }
-      }
-    }
-  } else if (type == LAZY) {
-    optimized_code_ = FindDeoptimizingCodeFromAddress(from);
-    ASSERT(optimized_code_ != NULL);
-  } else if (type == OSR) {
-    // The function has already been optimized and we're transitioning
-    // from the unoptimized shared version to the optimized one in the
-    // function. The return address (from) points to unoptimized code.
-    optimized_code_ = function_->code();
-    ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
-    ASSERT(!optimized_code_->contains(from));
-  } else if (type == DEBUGGER) {
-    optimized_code_ = optimized_code;
-    ASSERT(optimized_code_->contains(from));
+  if (function != NULL && function->IsOptimized()) {
+    function->shared()->increment_deopt_count();
   }
+  compiled_code_ = FindOptimizedCode(function, optimized_code);
+  if (TraceEnabledFor(type)) Trace();
   ASSERT(HEAP->allow_allocation(false));
   unsigned size = ComputeInputFrameSize();
   input_ = new(size) FrameDescription(size, function);
@@ -456,6 +544,57 @@
 }
 
 
+Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
+                                     Code* optimized_code) {
+  switch (bailout_type_) {
+    case Deoptimizer::EAGER:
+      ASSERT(from_ == NULL);
+      return function->code();
+    case Deoptimizer::LAZY: {
+      Code* compiled_code =
+          isolate_->deoptimizer_data()->FindDeoptimizingCode(from_);
+      return (compiled_code == NULL)
+          ? static_cast<Code*>(isolate_->heap()->FindCodeObject(from_))
+          : compiled_code;
+    }
+    case Deoptimizer::OSR: {
+      // The function has already been optimized and we're transitioning
+      // from the unoptimized shared version to the optimized one in the
+      // function. The return address (from_) points to unoptimized code.
+      Code* compiled_code = function->code();
+      ASSERT(compiled_code->kind() == Code::OPTIMIZED_FUNCTION);
+      ASSERT(!compiled_code->contains(from_));
+      return compiled_code;
+    }
+    case Deoptimizer::DEBUGGER:
+      ASSERT(optimized_code->contains(from_));
+      return optimized_code;
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+void Deoptimizer::Trace() {
+  PrintF("**** %s: ", Deoptimizer::MessageFor(bailout_type_));
+  PrintFunctionName();
+  PrintF(" at id #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
+         bailout_id_,
+         reinterpret_cast<intptr_t>(from_),
+         fp_to_sp_delta_ - (2 * kPointerSize));
+  if (bailout_type_ == EAGER) compiled_code_->PrintDeoptLocation(bailout_id_);
+}
+
+
+void Deoptimizer::PrintFunctionName() {
+  if (function_->IsJSFunction()) {
+    function_->PrintName();
+  } else {
+    PrintF("%s", Code::Kind2String(compiled_code_->kind()));
+  }
+}
+
+
 Deoptimizer::~Deoptimizer() {
   ASSERT(input_ == NULL && output_ == NULL);
 }
@@ -566,14 +705,14 @@
     PrintF("[deoptimizing%s: begin 0x%08" V8PRIxPTR " ",
            (bailout_type_ == LAZY ? " (lazy)" : ""),
            reinterpret_cast<intptr_t>(function_));
-    function_->PrintName();
+    PrintFunctionName();
     PrintF(" @%d]\n", bailout_id_);
   }
 
   // Determine basic deoptimization information.  The optimized frame is
   // described by the input data.
   DeoptimizationInputData* input_data =
-      DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
+      DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
   BailoutId node_id = input_data->AstId(bailout_id_);
   ByteArray* translations = input_data->TranslationByteArray();
   unsigned translation_index =
@@ -618,6 +757,9 @@
       case Translation::SETTER_STUB_FRAME:
         DoComputeAccessorStubFrame(&iterator, i, true);
         break;
+      case Translation::COMPILED_STUB_FRAME:
+        DoCompiledStubFrame(&iterator, i);
+        break;
       case Translation::BEGIN:
       case Translation::REGISTER:
       case Translation::INT32_REGISTER:
@@ -630,6 +772,7 @@
       case Translation::LITERAL:
       case Translation::ARGUMENTS_OBJECT:
       case Translation::DUPLICATE:
+      default:
         UNREACHABLE();
         break;
     }
@@ -642,7 +785,7 @@
     JSFunction* function = output_[index]->GetFunction();
     PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
            reinterpret_cast<intptr_t>(function));
-    function->PrintName();
+    if (function != NULL) function->PrintName();
     PrintF(" => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
            " took %0.3f ms]\n",
            node_id.ToInt(),
@@ -809,6 +952,7 @@
     case Translation::CONSTRUCT_STUB_FRAME:
     case Translation::GETTER_STUB_FRAME:
     case Translation::SETTER_STUB_FRAME:
+    case Translation::COMPILED_STUB_FRAME:
     case Translation::DUPLICATE:
       UNREACHABLE();
       return;
@@ -1117,6 +1261,7 @@
     case Translation::CONSTRUCT_STUB_FRAME:
     case Translation::GETTER_STUB_FRAME:
     case Translation::SETTER_STUB_FRAME:
+    case Translation::COMPILED_STUB_FRAME:
     case Translation::DUPLICATE:
       UNREACHABLE();  // Malformed input.
        return false;
@@ -1337,8 +1482,9 @@
     // environment at the OSR entry. The code for that his built into
     // the DoComputeOsrOutputFrame function for now.
   } else {
-    unsigned stack_slots = optimized_code_->stack_slots();
-    unsigned outgoing_size = ComputeOutgoingArgumentSize();
+    unsigned stack_slots = compiled_code_->stack_slots();
+    unsigned outgoing_size = compiled_code_->kind() == Code::COMPILED_STUB
+        ? 0 : ComputeOutgoingArgumentSize();
     ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
   }
 #endif
@@ -1357,6 +1503,10 @@
 unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
   // The incoming arguments is the values for formal parameters and
   // the receiver. Every slot contains a pointer.
+  if (function->IsSmi()) {
+    ASSERT(Smi::cast(function) == Smi::FromInt(StackFrame::STUB));
+    return 0;
+  }
   unsigned arguments = function->shared()->formal_parameter_count() + 1;
   return arguments * kPointerSize;
 }
@@ -1364,7 +1514,7 @@
 
 unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
   DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      optimized_code_->deoptimization_data());
+      compiled_code_->deoptimization_data());
   unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
   return height * kPointerSize;
 }
@@ -1372,7 +1522,7 @@
 
 Object* Deoptimizer::ComputeLiteral(int index) const {
   DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      optimized_code_->deoptimization_data());
+      compiled_code_->deoptimization_data());
   FixedArray* literals = data->LiteralArray();
   return literals->get(index);
 }
@@ -1403,23 +1553,21 @@
   // cause us to emit relocation information for the external
   // references. This is fine because the deoptimizer's code section
   // isn't meant to be serialized at all.
-  ASSERT(!Serializer::enabled());
-
   ASSERT(type == EAGER || type == LAZY);
   DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
   int entry_count = (type == EAGER)
       ? data->eager_deoptimization_entry_code_entries_
       : data->lazy_deoptimization_entry_code_entries_;
   if (max_entry_id < entry_count) return;
-  entry_count = Min(Max(entry_count * 2, Deoptimizer::kMinNumberOfEntries),
-                    Deoptimizer::kMaxNumberOfEntries);
+  entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries);
+  while (max_entry_id >= entry_count) entry_count *= 2;
+  ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries);
 
   MacroAssembler masm(Isolate::Current(), NULL, 16 * KB);
   masm.set_emit_debug_code(false);
   GenerateDeoptimizationEntries(&masm, entry_count, type);
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(desc.reloc_size == 0);
 
   VirtualMemory* memory = type == EAGER
       ? data->eager_deoptimization_entry_code_
@@ -1438,82 +1586,11 @@
 }
 
 
-Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) {
-  DeoptimizingCodeListNode* node =
-      Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
-  while (node != NULL) {
-    if (node->code()->contains(addr)) return *node->code();
-    node = node->next();
-  }
-  return NULL;
-}
-
-
-void Deoptimizer::RemoveDeoptimizingCode(Code* code) {
-  DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
-  ASSERT(data->deoptimizing_code_list_ != NULL);
-  // Run through the code objects to find this one and remove it.
-  DeoptimizingCodeListNode* prev = NULL;
-  DeoptimizingCodeListNode* current = data->deoptimizing_code_list_;
-  while (current != NULL) {
-    if (*current->code() == code) {
-      // Unlink from list. If prev is NULL we are looking at the first element.
-      if (prev == NULL) {
-        data->deoptimizing_code_list_ = current->next();
-      } else {
-        prev->set_next(current->next());
-      }
-      delete current;
-      return;
-    }
-    // Move to next in list.
-    prev = current;
-    current = current->next();
-  }
-  // Deoptimizing code is removed through weak callback. Each object is expected
-  // to be removed once and only once.
-  UNREACHABLE();
-}
-
-
-static Object* CutOutRelatedFunctionsList(Context* context,
-                                          Code* code,
-                                          Object* undefined) {
-  Object* result_list_head = undefined;
-  Object* head;
-  Object* current;
-  current = head = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
-  JSFunction* prev = NULL;
-  while (current != undefined) {
-    JSFunction* func = JSFunction::cast(current);
-    current = func->next_function_link();
-    if (func->code() == code) {
-      func->set_next_function_link(result_list_head);
-      result_list_head = func;
-      if (prev) {
-        prev->set_next_function_link(current);
-      } else {
-        head = current;
-      }
-    } else {
-      prev = func;
-    }
-  }
-  if (head != context->get(Context::OPTIMIZED_FUNCTIONS_LIST)) {
-    context->set(Context::OPTIMIZED_FUNCTIONS_LIST, head);
-  }
-  return result_list_head;
-}
-
-
 void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function,
                                                  Code* code) {
-  Context* context = function->context()->native_context();
-
   SharedFunctionInfo* shared = function->shared();
-
   Object* undefined = Isolate::Current()->heap()->undefined_value();
-  Object* current = CutOutRelatedFunctionsList(context, code, undefined);
+  Object* current = function;
 
   while (current != undefined) {
     JSFunction* func = JSFunction::cast(current);
@@ -1681,6 +1758,11 @@
 }
 
 
+void Translation::BeginCompiledStubFrame() {
+  buffer_->Add(COMPILED_STUB_FRAME, zone());
+}
+
+
 void Translation::StoreRegister(Register reg) {
   buffer_->Add(REGISTER, zone());
   buffer_->Add(reg.code(), zone());
@@ -1762,6 +1844,7 @@
     case UINT32_STACK_SLOT:
     case DOUBLE_STACK_SLOT:
     case LITERAL:
+    case COMPILED_STUB_FRAME:
       return 1;
     case BEGIN:
     case ARGUMENTS_ADAPTOR_FRAME:
@@ -1792,6 +1875,8 @@
       return "GETTER_STUB_FRAME";
     case SETTER_STUB_FRAME:
       return "SETTER_STUB_FRAME";
+    case COMPILED_STUB_FRAME:
+      return "COMPILED_STUB_FRAME";
     case REGISTER:
       return "REGISTER";
     case INT32_REGISTER:
@@ -1899,6 +1984,10 @@
       int literal_index = iterator->Next();
       return SlotRef(data->LiteralArray()->get(literal_index));
     }
+
+    case Translation::COMPILED_STUB_FRAME:
+      UNREACHABLE();
+      break;
   }
 
   UNREACHABLE();
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 89955b3..870fdb2 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -87,6 +87,14 @@
 };
 
 
+class OptimizedFunctionFilter BASE_EMBEDDED {
+ public:
+  virtual ~OptimizedFunctionFilter() {}
+
+  virtual bool TakeFunction(JSFunction* function) = 0;
+};
+
+
 class Deoptimizer;
 
 
@@ -99,6 +107,9 @@
   void Iterate(ObjectVisitor* v);
 #endif
 
+  Code* FindDeoptimizingCode(Address addr);
+  void RemoveDeoptimizingCode(Code* code);
+
  private:
   int eager_deoptimization_entry_code_entries_;
   int lazy_deoptimization_entry_code_entries_;
@@ -133,8 +144,13 @@
     DEBUGGER
   };
 
+  static bool TraceEnabledFor(BailoutType type);
+  static const char* MessageFor(BailoutType type);
+
   int output_count() const { return output_count_; }
 
+  Code::Kind compiled_code_kind() const { return compiled_code_->kind(); }
+
   // Number of created JS frames. Not all created frames are necessarily JS.
   int jsframe_count() const { return jsframe_count_; }
 
@@ -177,12 +193,14 @@
 
   static void DeoptimizeGlobalObject(JSObject* object);
 
+  static void DeoptimizeAllFunctionsWith(OptimizedFunctionFilter* filter);
+
+  static void DeoptimizeAllFunctionsForContext(
+      Context* context, OptimizedFunctionFilter* filter);
+
   static void VisitAllOptimizedFunctionsForContext(
       Context* context, OptimizedFunctionVisitor* visitor);
 
-  static void VisitAllOptimizedFunctionsForGlobalObject(
-      JSObject* object, OptimizedFunctionVisitor* visitor);
-
   static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor);
 
   // The size in bytes of the code required at a lazy deopt patch site.
@@ -297,6 +315,9 @@
 
   static size_t GetMaxDeoptTableSize();
 
+  static void EnsureCodeForDeoptimizationEntry(BailoutType type,
+                                               int max_entry_id);
+
  private:
   static const int kMinNumberOfEntries = 64;
   static const int kMaxNumberOfEntries = 16384;
@@ -308,6 +329,9 @@
               Address from,
               int fp_to_sp_delta,
               Code* optimized_code);
+  Code* FindOptimizedCode(JSFunction* function, Code* optimized_code);
+  void Trace();
+  void PrintFunctionName();
   void DeleteFrameDescriptions();
 
   void DoComputeOutputFrames();
@@ -320,6 +344,8 @@
   void DoComputeAccessorStubFrame(TranslationIterator* iterator,
                                   int frame_index,
                                   bool is_setter_stub_frame);
+  void DoCompiledStubFrame(TranslationIterator* iterator,
+                           int frame_index);
   void DoTranslateCommand(TranslationIterator* iterator,
                           int frame_index,
                           unsigned output_offset);
@@ -342,16 +368,16 @@
   void AddArgumentsObjectValue(intptr_t value);
   void AddDoubleValue(intptr_t slot_address, double value);
 
-  static void EnsureCodeForDeoptimizationEntry(BailoutType type,
-                                               int max_entry_id);
   static void GenerateDeoptimizationEntries(
       MacroAssembler* masm, int count, BailoutType type);
 
   // Weak handle callback for deoptimizing code objects.
   static void HandleWeakDeoptimizedCode(
       v8::Persistent<v8::Value> obj, void* data);
-  static Code* FindDeoptimizingCodeFromAddress(Address addr);
-  static void RemoveDeoptimizingCode(Code* code);
+
+  // Deoptimize function assuming that function->next_function_link() points
+  // to a list that contains all functions that share the same optimized code.
+  static void DeoptimizeFunctionWithPreparedFunctionList(JSFunction* function);
 
   // Fill the input from from a JavaScript frame. This is used when
   // the debugger needs to inspect an optimized frame. For normal
@@ -360,7 +386,7 @@
 
   Isolate* isolate_;
   JSFunction* function_;
-  Code* optimized_code_;
+  Code* compiled_code_;
   unsigned bailout_id_;
   BailoutType bailout_type_;
   Address from_;
@@ -530,7 +556,7 @@
   uintptr_t frame_size_;  // Number of bytes.
   JSFunction* function_;
   intptr_t registers_[Register::kNumRegisters];
-  double double_registers_[DoubleRegister::kNumAllocatableRegisters];
+  double double_registers_[DoubleRegister::kMaxNumAllocatableRegisters];
   intptr_t top_;
   intptr_t pc_;
   intptr_t fp_;
@@ -600,6 +626,7 @@
     GETTER_STUB_FRAME,
     SETTER_STUB_FRAME,
     ARGUMENTS_ADAPTOR_FRAME,
+    COMPILED_STUB_FRAME,
     REGISTER,
     INT32_REGISTER,
     UINT32_REGISTER,
@@ -630,6 +657,7 @@
 
   // Commands.
   void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
+  void BeginCompiledStubFrame();
   void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
   void BeginConstructStubFrame(int literal_id, unsigned height);
   void BeginGetterStubFrame(int literal_id);
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 9f8b9a8..05d6b9b 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -287,7 +287,12 @@
         Address addr = relocinfo.target_address();
         int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
         if (id == Deoptimizer::kNotDeoptimizationEntry) {
-          out.AddFormatted("    ;; %s", RelocInfo::RelocModeName(rmode));
+          id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::LAZY);
+          if (id == Deoptimizer::kNotDeoptimizationEntry) {
+            out.AddFormatted("    ;; %s", RelocInfo::RelocModeName(rmode));
+          } else {
+            out.AddFormatted("    ;; lazy deoptimization bailout %d", id);
+          }
         } else {
           out.AddFormatted("    ;; deoptimization bailout %d", id);
         }
@@ -322,7 +327,8 @@
 
 // Called by Code::CodePrint.
 void Disassembler::Decode(FILE* f, Code* code) {
-  int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
+  int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION ||
+                     code->kind() == Code::COMPILED_STUB)
       ? static_cast<int>(code->safepoint_table_offset())
       : code->instruction_size();
   // If there might be a stack check table, stop before reaching it.
diff --git a/src/factory.cc b/src/factory.cc
index 556f2b0..8adae4e 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -158,9 +158,9 @@
 
 
 // Symbols are created in the old generation (data space).
-Handle<String> Factory::LookupSymbol(Vector<const char> string) {
+Handle<String> Factory::LookupUtf8Symbol(Vector<const char> string) {
   CALL_HEAP_FUNCTION(isolate(),
-                     isolate()->heap()->LookupSymbol(string),
+                     isolate()->heap()->LookupUtf8Symbol(string),
                      String);
 }
 
@@ -171,18 +171,18 @@
                      String);
 }
 
-Handle<String> Factory::LookupAsciiSymbol(Vector<const char> string) {
+Handle<String> Factory::LookupOneByteSymbol(Vector<const char> string) {
   CALL_HEAP_FUNCTION(isolate(),
-                     isolate()->heap()->LookupAsciiSymbol(string),
+                     isolate()->heap()->LookupOneByteSymbol(string),
                      String);
 }
 
 
-Handle<String> Factory::LookupAsciiSymbol(Handle<SeqOneByteString> string,
+Handle<String> Factory::LookupOneByteSymbol(Handle<SeqOneByteString> string,
                                           int from,
                                           int length) {
   CALL_HEAP_FUNCTION(isolate(),
-                     isolate()->heap()->LookupAsciiSymbol(string,
+                     isolate()->heap()->LookupOneByteSymbol(string,
                                                           from,
                                                           length),
                      String);
@@ -741,7 +741,7 @@
 Handle<Object> Factory::NewError(const char* maker,
                                  const char* type,
                                  Handle<JSArray> args) {
-  Handle<String> make_str = LookupAsciiSymbol(maker);
+  Handle<String> make_str = LookupUtf8Symbol(maker);
   Handle<Object> fun_obj(
       isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str));
   // If the builtins haven't been properly configured yet this error
@@ -750,7 +750,7 @@
     return EmergencyNewError(type, args);
   }
   Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
-  Handle<Object> type_obj = LookupAsciiSymbol(type);
+  Handle<Object> type_obj = LookupUtf8Symbol(type);
   Handle<Object> argv[] = { type_obj, args };
 
   // Invoke the JavaScript factory method. If an exception is thrown while
@@ -772,7 +772,7 @@
 
 Handle<Object> Factory::NewError(const char* constructor,
                                  Handle<String> message) {
-  Handle<String> constr = LookupAsciiSymbol(constructor);
+  Handle<String> constr = LookupUtf8Symbol(constructor);
   Handle<JSFunction> fun = Handle<JSFunction>(
       JSFunction::cast(isolate()->js_builtins_object()->
                        GetPropertyNoExceptionThrown(*constr)));
@@ -1260,6 +1260,10 @@
                   instance_size,
                   code,
                   true);
+
+  // Set length.
+  result->shared()->set_length(obj->length());
+
   // Set class name.
   Handle<Object> class_name = Handle<Object>(obj->class_name());
   if (class_name->IsString()) {
diff --git a/src/factory.h b/src/factory.h
index dd613b7..5493335 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -79,16 +79,16 @@
 
   Handle<TypeFeedbackInfo> NewTypeFeedbackInfo();
 
-  Handle<String> LookupSymbol(Vector<const char> str);
+  Handle<String> LookupUtf8Symbol(Vector<const char> str);
+  Handle<String> LookupUtf8Symbol(const char* str) {
+    return LookupUtf8Symbol(CStrVector(str));
+  }
   Handle<String> LookupSymbol(Handle<String> str);
-  Handle<String> LookupAsciiSymbol(Vector<const char> str);
-  Handle<String> LookupAsciiSymbol(Handle<SeqOneByteString>,
+  Handle<String> LookupOneByteSymbol(Vector<const char> str);
+  Handle<String> LookupOneByteSymbol(Handle<SeqOneByteString>,
                                    int from,
                                    int length);
   Handle<String> LookupTwoByteSymbol(Vector<const uc16> str);
-  Handle<String> LookupAsciiSymbol(const char* str) {
-    return LookupSymbol(CStrVector(str));
-  }
 
 
   // String creation functions.  Most of the string creation functions take
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 27a526c..a514632 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -235,6 +235,11 @@
 }
 
 
+inline StubFrame::StubFrame(StackFrameIterator* iterator)
+    : StandardFrame(iterator) {
+}
+
+
 inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
     : JavaScriptFrame(iterator) {
 }
diff --git a/src/frames.cc b/src/frames.cc
index 3b60fb5..23bceda 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -617,13 +617,7 @@
 }
 
 
-void OptimizedFrame::Iterate(ObjectVisitor* v) const {
-#ifdef DEBUG
-  // Make sure that optimized frames do not contain any stack handlers.
-  StackHandlerIterator it(this, top_handler());
-  ASSERT(it.done());
-#endif
-
+void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
   // Make sure that we're not doing "safe" stack frame iteration. We cannot
   // possibly find pointers in optimized frames in that state.
   ASSERT(!SafeStackFrameIterator::is_active(isolate()));
@@ -649,7 +643,7 @@
 
   // Skip saved double registers.
   if (safepoint_entry.has_doubles()) {
-    parameters_base += DoubleRegister::kNumAllocatableRegisters *
+    parameters_base += DoubleRegister::NumAllocatableRegisters() *
         kDoubleSize / kPointerSize;
   }
 
@@ -681,14 +675,50 @@
     }
   }
 
+  // Visit the return address in the callee and incoming arguments.
+  IteratePc(v, pc_address(), code);
+}
+
+
+void StubFrame::Iterate(ObjectVisitor* v) const {
+  IterateCompiledFrame(v);
+}
+
+
+Code* StubFrame::unchecked_code() const {
+  return static_cast<Code*>(isolate()->heap()->FindCodeObject(pc()));
+}
+
+
+Address StubFrame::GetCallerStackPointer() const {
+  return fp() + ExitFrameConstants::kCallerSPDisplacement;
+}
+
+
+int StubFrame::GetNumberOfIncomingArguments() const {
+  return 0;
+}
+
+
+void OptimizedFrame::Iterate(ObjectVisitor* v) const {
+#ifdef DEBUG
+  // Make sure that optimized frames do not contain any stack handlers.
+  StackHandlerIterator it(this, top_handler());
+  ASSERT(it.done());
+#endif
+
+  IterateCompiledFrame(v);
+
   // Visit the context and the function.
   Object** fixed_base = &Memory::Object_at(
       fp() + JavaScriptFrameConstants::kFunctionOffset);
   Object** fixed_limit = &Memory::Object_at(fp());
   v->VisitPointers(fixed_base, fixed_limit);
+}
 
-  // Visit the return address in the callee and incoming arguments.
-  IteratePc(v, pc_address(), code);
+
+void JavaScriptFrame::SetParameterValue(int index, Object* value) const {
+  Memory::Object_at(GetParameterSlot(index)) = value;
 }
 
 
diff --git a/src/frames.h b/src/frames.h
index 30f7e1f..a1d24ac 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -136,6 +136,7 @@
   V(EXIT,              ExitFrame)             \
   V(JAVA_SCRIPT,       JavaScriptFrame)       \
   V(OPTIMIZED,         OptimizedFrame)        \
+  V(STUB,              StubFrame)             \
   V(INTERNAL,          InternalFrame)         \
   V(CONSTRUCT,         ConstructFrame)        \
   V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
@@ -263,12 +264,12 @@
                      PrintMode mode,
                      int index) const { }
 
+  Isolate* isolate() const { return isolate_; }
+
  protected:
   inline explicit StackFrame(StackFrameIterator* iterator);
   virtual ~StackFrame() { }
 
-  Isolate* isolate() const { return isolate_; }
-
   // Compute the stack pointer for the calling frame.
   virtual Address GetCallerStackPointer() const = 0;
 
@@ -448,6 +449,9 @@
   // construct frame.
   static inline bool IsConstructFrame(Address fp);
 
+  // Used by OptimizedFrames and StubFrames.
+  void IterateCompiledFrame(ObjectVisitor* v) const;
+
  private:
   friend class StackFrame;
   friend class StackFrameIterator;
@@ -500,6 +504,9 @@
     return GetNumberOfIncomingArguments();
   }
 
+  // Debugger access.
+  void SetParameterValue(int index, Object* value) const;
+
   // Check if this frame is a constructor frame invoked through 'new'.
   bool IsConstructor() const;
 
@@ -555,6 +562,27 @@
 };
 
 
+class StubFrame : public StandardFrame {
+ public:
+  virtual Type type() const { return STUB; }
+
+  // GC support.
+  virtual void Iterate(ObjectVisitor* v) const;
+
+  // Determine the code for the frame.
+  virtual Code* unchecked_code() const;
+
+ protected:
+  inline explicit StubFrame(StackFrameIterator* iterator);
+
+  virtual Address GetCallerStackPointer() const;
+
+  virtual int GetNumberOfIncomingArguments() const;
+
+  friend class StackFrameIterator;
+};
+
+
 class OptimizedFrame : public JavaScriptFrame {
  public:
   virtual Type type() const { return OPTIMIZED; }
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 928da4a..faf111f 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -398,6 +398,7 @@
                          !Snapshot::HaveASnapshotToStartFrom();
   masm_->set_emit_debug_code(generate_debug_code_);
   masm_->set_predictable_code_size(true);
+  InitializeAstVisitor();
 }
 
 
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 2f88184..c25076a 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -48,7 +48,9 @@
 // debugger to piggybag on.
 class BreakableStatementChecker: public AstVisitor {
  public:
-  BreakableStatementChecker() : is_breakable_(false) {}
+  BreakableStatementChecker() : is_breakable_(false) {
+    InitializeAstVisitor();
+  }
 
   void Check(Statement* stmt);
   void Check(Expression* stmt);
@@ -63,6 +65,7 @@
 
   bool is_breakable_;
 
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
   DISALLOW_COPY_AND_ASSIGN(BreakableStatementChecker);
 };
 
@@ -825,6 +828,7 @@
 
   friend class NestedStatement;
 
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
   DISALLOW_COPY_AND_ASSIGN(FullCodeGenerator);
 };
 
diff --git a/src/handles.cc b/src/handles.cc
index 7397cc0..855997e 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -282,7 +282,7 @@
 Handle<Object> GetProperty(Handle<JSReceiver> obj,
                            const char* name) {
   Isolate* isolate = obj->GetIsolate();
-  Handle<String> str = isolate->factory()->LookupAsciiSymbol(name);
+  Handle<String> str = isolate->factory()->LookupUtf8Symbol(name);
   CALL_HEAP_FUNCTION(isolate, obj->GetProperty(*str), Object);
 }
 
@@ -375,6 +375,15 @@
   Handle<JSFunction> constructor = isolate->script_function();
   Handle<JSValue> result =
       Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor));
+
+  // The allocation might have triggered a GC, which could have called this
+  // function recursively, and a wrapper has already been created and cached.
+  // In that case, simply return the cached wrapper.
+  if (script->wrapper()->foreign_address() != NULL) {
+    return Handle<JSValue>(
+        reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
+  }
+
   result->set_value(*script);
 
   // Create a new weak global handle and use it to cache the wrapper
@@ -596,7 +605,8 @@
 Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script) {
   Isolate* isolate = script->GetIsolate();
   Handle<String> name_or_source_url_key =
-      isolate->factory()->LookupAsciiSymbol("nameOrSourceURL");
+      isolate->factory()->LookupOneByteSymbol(
+          STATIC_ASCII_VECTOR("nameOrSourceURL"));
   Handle<JSValue> script_wrapper = GetScriptWrapper(script);
   Handle<Object> property = GetProperty(script_wrapper,
                                         name_or_source_url_key);
diff --git a/src/heap-inl.h b/src/heap-inl.h
index de47c94..6d2e958 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -98,12 +98,34 @@
 }
 
 
+template<>
+bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
+  // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
+  // ASCII only check.
+  return chars == str.length();
+}
+
+
+template<>
+bool inline Heap::IsOneByte(String* str, int chars) {
+  return str->IsOneByteRepresentation();
+}
+
+
 MaybeObject* Heap::AllocateSymbol(Vector<const char> str,
                                   int chars,
                                   uint32_t hash_field) {
-  unibrow::Utf8InputBuffer<> buffer(str.start(),
-                                    static_cast<unsigned>(str.length()));
-  return AllocateInternalSymbol(&buffer, chars, hash_field);
+  if (IsOneByte(str, chars)) return AllocateAsciiSymbol(str, hash_field);
+  return AllocateInternalSymbol<false>(str, chars, hash_field);
+}
+
+
+template<typename T>
+MaybeObject* Heap::AllocateInternalSymbol(T t, int chars, uint32_t hash_field) {
+  if (IsOneByte(t, chars)) {
+    return AllocateInternalSymbol<true>(t, chars, hash_field);
+  }
+  return AllocateInternalSymbol<false>(t, chars, hash_field);
 }
 
 
@@ -644,6 +666,19 @@
 }
 
 
+void ErrorObjectList::Add(JSObject* object) {
+  list_.Add(object);
+}
+
+
+void ErrorObjectList::Iterate(ObjectVisitor* v) {
+  if (!list_.is_empty()) {
+    Object** start = &list_[0];
+    v->VisitPointers(start, start + list_.length());
+  }
+}
+
+
 void Heap::ClearInstanceofCache() {
   set_instanceof_cache_function(the_hole_value());
 }
diff --git a/src/heap.cc b/src/heap.cc
index 746c9b6..ec98ca5 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -550,6 +550,8 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   isolate_->debug()->AfterGarbageCollection();
 #endif  // ENABLE_DEBUGGER_SUPPORT
+
+  error_object_list_.DeferredFormatStackTrace(isolate());
 }
 
 
@@ -1354,6 +1356,8 @@
   UpdateNewSpaceReferencesInExternalStringTable(
       &UpdateNewSpaceReferenceInExternalStringTableEntry);
 
+  error_object_list_.UpdateReferencesInNewSpace(this);
+
   promotion_queue_.Destroy();
 
   LiveObjectList::UpdateReferencesForScavengeGC();
@@ -2772,7 +2776,7 @@
 
   for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
     { MaybeObject* maybe_obj =
-          LookupAsciiSymbol(constant_symbol_table[i].contents);
+          LookupUtf8Symbol(constant_symbol_table[i].contents);
       if (!maybe_obj->ToObject(&obj)) return false;
     }
     roots_[constant_symbol_table[i].index] = String::cast(obj);
@@ -3298,8 +3302,8 @@
 
 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
     Heap* heap,
-    uint32_t c1,
-    uint32_t c2) {
+    uint16_t c1,
+    uint16_t c2) {
   String* symbol;
   // Numeric strings have a different hash algorithm not known by
   // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
@@ -3308,15 +3312,16 @@
     return symbol;
   // Now we know the length is 2, we might as well make use of that fact
   // when building the new string.
-  } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) {  // We can do this
+  } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxAsciiCharCodeU) {
+    // We can do this.
     ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1));  // because of this.
     Object* result;
     { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
       if (!maybe_result->ToObject(&result)) return maybe_result;
     }
     char* dest = SeqOneByteString::cast(result)->GetChars();
-    dest[0] = c1;
-    dest[1] = c2;
+    dest[0] = static_cast<char>(c1);
+    dest[1] = static_cast<char>(c2);
     return result;
   } else {
     Object* result;
@@ -3348,8 +3353,8 @@
   // dictionary.  Check whether we already have the string in the symbol
   // table to prevent creation of many unneccesary strings.
   if (length == 2) {
-    unsigned c1 = first->Get(0);
-    unsigned c2 = second->Get(0);
+    uint16_t c1 = first->Get(0);
+    uint16_t c2 = second->Get(0);
     return MakeOrFindTwoCharacterString(this, c1, c2);
   }
 
@@ -3463,8 +3468,8 @@
     // Optimization for 2-byte strings often used as keys in a decompression
     // dictionary.  Check whether we already have the string in the symbol
     // table to prevent creation of many unneccesary strings.
-    unsigned c1 = buffer->Get(start);
-    unsigned c2 = buffer->Get(start + 1);
+    uint16_t c1 = buffer->Get(start);
+    uint16_t c2 = buffer->Get(start + 1);
     return MakeOrFindTwoCharacterString(this, c1, c2);
   }
 
@@ -3606,7 +3611,8 @@
     char buffer[1];
     buffer[0] = static_cast<char>(code);
     Object* result;
-    MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
+    MaybeObject* maybe_result =
+        LookupOneByteSymbol(Vector<const char>(buffer, 1));
 
     if (!maybe_result->ToObject(&result)) return maybe_result;
     single_character_string_cache()->set(code, result);
@@ -4457,7 +4463,7 @@
   SharedFunctionInfo* shared = NULL;
   if (type == JS_FUNCTION_TYPE) {
     String* name;
-    maybe = LookupAsciiSymbol("<freezing call trap>");
+    maybe = LookupOneByteSymbol(STATIC_ASCII_VECTOR("<freezing call trap>"));
     if (!maybe->To<String>(&name)) return maybe;
     maybe = AllocateSharedFunctionInfo(name);
     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
@@ -4540,37 +4546,31 @@
                                               PretenureFlag pretenure) {
   // Continue counting the number of characters in the UTF-8 string, starting
   // from the first non-ascii character or word.
-  int chars = non_ascii_start;
   Access<UnicodeCache::Utf8Decoder>
       decoder(isolate_->unicode_cache()->utf8_decoder());
-  decoder->Reset(string.start() + non_ascii_start, string.length() - chars);
-  while (decoder->has_more()) {
-    uint32_t r = decoder->GetNext();
-    if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
-      chars++;
-    } else {
-      chars += 2;
-    }
-  }
-
+  decoder->Reset(string.start() + non_ascii_start,
+                 string.length() - non_ascii_start);
+  int utf16_length = decoder->Utf16Length();
+  ASSERT(utf16_length > 0);
+  // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
+  {
+    int chars = non_ascii_start + utf16_length;
+    MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-
   // Convert and copy the characters into the new object.
   SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
-  decoder->Reset(string.start(), string.length());
-  int i = 0;
-  while (i < chars) {
-    uint32_t r = decoder->GetNext();
-    if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
-      twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::LeadSurrogate(r));
-      twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::TrailSurrogate(r));
-    } else {
-      twobyte->SeqTwoByteStringSet(i++, r);
+  // Copy ascii portion.
+  uint16_t* data = twobyte->GetChars();
+  if (non_ascii_start != 0) {
+    const char* ascii_data = string.start();
+    for (int i = 0; i < non_ascii_start; i++) {
+      *data++ = *ascii_data++;
     }
   }
+  // Now write the remainder.
+  decoder->WriteUtf16(data, utf16_length);
   return result;
 }
 
@@ -4619,27 +4619,88 @@
 }
 
 
-MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
+template<typename T>
+class AllocateInternalSymbolHelper {
+ public:
+  static void WriteOneByteData(T t, char* chars, int len);
+  static void WriteTwoByteData(T t, uint16_t* chars, int len);
+ private:
+  DISALLOW_COPY_AND_ASSIGN(AllocateInternalSymbolHelper);
+};
+
+
+template<>
+class AllocateInternalSymbolHelper< Vector<const char> > {
+ public:
+  static inline void WriteOneByteData(Vector<const char> vector,
+                                      char* chars,
+                                      int len) {
+    // Only works for ascii.
+    ASSERT(vector.length() == len);
+    memcpy(chars, vector.start(), len);
+  }
+
+  static inline void WriteTwoByteData(Vector<const char> vector,
+                                      uint16_t* chars,
+                                      int len) {
+    const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
+    unsigned stream_length = vector.length();
+    while (stream_length != 0) {
+      unsigned consumed = 0;
+      uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
+      ASSERT(c != unibrow::Utf8::kBadChar);
+      ASSERT(consumed <= stream_length);
+      stream_length -= consumed;
+      stream += consumed;
+      if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+        len -= 2;
+        if (len < 0) break;
+        *chars++ = unibrow::Utf16::LeadSurrogate(c);
+        *chars++ = unibrow::Utf16::TrailSurrogate(c);
+      } else {
+        len -= 1;
+        if (len < 0) break;
+        *chars++ = c;
+      }
+    }
+    ASSERT(stream_length == 0);
+    ASSERT(len == 0);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(AllocateInternalSymbolHelper);
+};
+
+
+template<>
+class AllocateInternalSymbolHelper<String*> {
+ public:
+  static inline void WriteOneByteData(String* s, char* chars, int len) {
+    ASSERT(s->length() == len);
+    String::WriteToFlat(s, chars, 0, len);
+  }
+
+  static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
+    ASSERT(s->length() == len);
+    String::WriteToFlat(s, chars, 0, len);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(AllocateInternalSymbolHelper<String*>);
+};
+
+
+template<bool is_one_byte, typename T>
+MaybeObject* Heap::AllocateInternalSymbol(T t,
                                           int chars,
                                           uint32_t hash_field) {
+  typedef AllocateInternalSymbolHelper<T> H;
   ASSERT(chars >= 0);
-  // Ensure the chars matches the number of characters in the buffer.
-  ASSERT(static_cast<unsigned>(chars) == buffer->Utf16Length());
-  // Determine whether the string is ASCII.
-  bool is_ascii = true;
-  while (buffer->has_more()) {
-    if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
-      is_ascii = false;
-      break;
-    }
-  }
-  buffer->Rewind();
-
   // Compute map and object size.
   int size;
   Map* map;
 
-  if (is_ascii) {
+  if (is_one_byte) {
     if (chars > SeqOneByteString::kMaxLength) {
       return Failure::OutOfMemoryException();
     }
@@ -4669,21 +4730,26 @@
 
   ASSERT_EQ(size, answer->Size());
 
-  // Fill in the characters.
-  int i = 0;
-  while (i < chars) {
-    uint32_t character = buffer->GetNext();
-    if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
-      answer->Set(i++, unibrow::Utf16::LeadSurrogate(character));
-      answer->Set(i++, unibrow::Utf16::TrailSurrogate(character));
-    } else {
-      answer->Set(i++, character);
-    }
+  if (is_one_byte) {
+    H::WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
+  } else {
+    H::WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
   }
   return answer;
 }
 
 
+// Need explicit instantiations.
+template
+MaybeObject* Heap::AllocateInternalSymbol<true>(String*, int, uint32_t);
+template
+MaybeObject* Heap::AllocateInternalSymbol<false>(String*, int, uint32_t);
+template
+MaybeObject* Heap::AllocateInternalSymbol<false>(Vector<const char>,
+                                                 int,
+                                                 uint32_t);
+
+
 MaybeObject* Heap::AllocateRawOneByteString(int length,
                                             PretenureFlag pretenure) {
   if (length < 0 || length > SeqOneByteString::kMaxLength) {
@@ -5550,11 +5616,11 @@
 #endif
 
 
-MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
+MaybeObject* Heap::LookupUtf8Symbol(Vector<const char> string) {
   Object* symbol = NULL;
   Object* new_table;
   { MaybeObject* maybe_new_table =
-        symbol_table()->LookupSymbol(string, &symbol);
+        symbol_table()->LookupUtf8Symbol(string, &symbol);
     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
   }
   // Can't use set_symbol_table because SymbolTable::cast knows that
@@ -5565,11 +5631,11 @@
 }
 
 
-MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
+MaybeObject* Heap::LookupOneByteSymbol(Vector<const char> string) {
   Object* symbol = NULL;
   Object* new_table;
   { MaybeObject* maybe_new_table =
-        symbol_table()->LookupAsciiSymbol(string, &symbol);
+        symbol_table()->LookupOneByteSymbol(string, &symbol);
     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
   }
   // Can't use set_symbol_table because SymbolTable::cast knows that
@@ -5580,13 +5646,13 @@
 }
 
 
-MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqOneByteString> string,
+MaybeObject* Heap::LookupOneByteSymbol(Handle<SeqOneByteString> string,
                                      int from,
                                      int length) {
   Object* symbol = NULL;
   Object* new_table;
   { MaybeObject* maybe_new_table =
-        symbol_table()->LookupSubStringAsciiSymbol(string,
+        symbol_table()->LookupSubStringOneByteSymbol(string,
                                                    from,
                                                    length,
                                                    &symbol);
@@ -5870,6 +5936,7 @@
       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
     // Scavenge collections have special processing for this.
     external_string_table_.Iterate(v);
+    error_object_list_.Iterate(v);
   }
   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
 }
@@ -6243,6 +6310,8 @@
 
   external_string_table_.TearDown();
 
+  error_object_list_.TearDown();
+
   new_space_.TearDown();
 
   if (old_pointer_space_ != NULL) {
@@ -7149,6 +7218,8 @@
     }
   }
   new_space_strings_.Rewind(last);
+  new_space_strings_.Trim();
+
   last = 0;
   for (int i = 0; i < old_space_strings_.length(); ++i) {
     if (old_space_strings_[i] == heap_->the_hole_value()) {
@@ -7158,6 +7229,7 @@
     old_space_strings_[last++] = old_space_strings_[i];
   }
   old_space_strings_.Rewind(last);
+  old_space_strings_.Trim();
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) {
     Verify();
@@ -7172,6 +7244,114 @@
 }
 
 
+// Update all references.
+void ErrorObjectList::UpdateReferences() {
+  for (int i = 0; i < list_.length(); i++) {
+    HeapObject* object = HeapObject::cast(list_[i]);
+    MapWord first_word = object->map_word();
+    if (first_word.IsForwardingAddress()) {
+      list_[i] = first_word.ToForwardingAddress();
+    }
+  }
+}
+
+
+// Unforwarded objects in new space are dead and removed from the list.
+void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
+  if (!nested_) {
+    int write_index = 0;
+    for (int i = 0; i < list_.length(); i++) {
+      MapWord first_word = HeapObject::cast(list_[i])->map_word();
+      if (first_word.IsForwardingAddress()) {
+        list_[write_index++] = first_word.ToForwardingAddress();
+      }
+    }
+    list_.Rewind(write_index);
+  } else {
+    // If a GC is triggered during DeferredFormatStackTrace, we do not move
+    // objects in the list, just remove dead ones, as to not confuse the
+    // loop in DeferredFormatStackTrace.
+    for (int i = 0; i < list_.length(); i++) {
+      MapWord first_word = HeapObject::cast(list_[i])->map_word();
+      list_[i] = first_word.IsForwardingAddress()
+                     ? first_word.ToForwardingAddress()
+                     : heap->the_hole_value();
+    }
+  }
+}
+
+
+void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
+  // If formatting the stack trace causes a GC, this method will be
+  // recursively called.  In that case, skip the recursive call, since
+  // the loop modifies the list while iterating over it.
+  if (nested_ || isolate->has_pending_exception()) return;
+  nested_ = true;
+  HandleScope scope(isolate);
+  Handle<String> stack_key = isolate->factory()->stack_symbol();
+  int write_index = 0;
+  int budget = kBudgetPerGC;
+  for (int i = 0; i < list_.length(); i++) {
+    Object* object = list_[i];
+    JSFunction* getter_fun;
+
+    { AssertNoAllocation assert;
+      // Skip possible holes in the list.
+      if (object->IsTheHole()) continue;
+      if (isolate->heap()->InNewSpace(object) || budget == 0) {
+        list_[write_index++] = object;
+        continue;
+      }
+
+      // Check whether the stack property is backed by the original getter.
+      LookupResult lookup(isolate);
+      JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
+      if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
+      Object* callback = lookup.GetCallbackObject();
+      if (!callback->IsAccessorPair()) continue;
+      Object* getter_obj = AccessorPair::cast(callback)->getter();
+      if (!getter_obj->IsJSFunction()) continue;
+      getter_fun = JSFunction::cast(getter_obj);
+      String* key = isolate->heap()->hidden_stack_trace_symbol();
+      if (key != getter_fun->GetHiddenProperty(key)) continue;
+    }
+
+    budget--;
+    HandleScope scope(isolate);
+    bool has_exception = false;
+    Handle<Object> object_handle(object, isolate);
+    Handle<Object> getter_handle(getter_fun, isolate);
+    Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
+    if (has_exception) {
+      // Hit an exception (most likely a stack overflow).
+      // Wrap up this pass and retry after another GC.
+      isolate->clear_pending_exception();
+      // We use the handle since calling the getter might have caused a GC.
+      list_[write_index++] = *object_handle;
+      budget = 0;
+    }
+  }
+  list_.Rewind(write_index);
+  list_.Trim();
+  nested_ = false;
+}
+
+
+void ErrorObjectList::RemoveUnmarked(Heap* heap) {
+  for (int i = 0; i < list_.length(); i++) {
+    HeapObject* object = HeapObject::cast(list_[i]);
+    if (!Marking::MarkBitFrom(object).Get()) {
+      list_[i] = heap->the_hole_value();
+    }
+  }
+}
+
+
+void ErrorObjectList::TearDown() {
+  list_.Free();
+}
+
+
 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
   chunk->set_next_chunk(chunks_queued_for_free_);
   chunks_queued_for_free_ = chunk;
diff --git a/src/heap.h b/src/heap.h
index 72035ca..c9a17ab 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -209,6 +209,7 @@
   V(char_at_symbol, "CharAt")                                            \
   V(undefined_symbol, "undefined")                                       \
   V(value_of_symbol, "valueOf")                                          \
+  V(stack_symbol, "stack")                                               \
   V(InitializeVarGlobal_symbol, "InitializeVarGlobal")                   \
   V(InitializeConstGlobal_symbol, "InitializeConstGlobal")               \
   V(KeyedLoadElementMonomorphic_symbol,                                  \
@@ -427,6 +428,41 @@
 };
 
 
+// The stack property of an error object is implemented as a getter that
+// formats the attached raw stack trace into a string.  This raw stack trace
+// keeps code and function objects alive until the getter is called the first
+// time.  To release those objects, we call the getter after each GC for
+// newly tenured error objects that are kept in a list.
+class ErrorObjectList {
+ public:
+  inline void Add(JSObject* object);
+
+  inline void Iterate(ObjectVisitor* v);
+
+  void TearDown();
+
+  void RemoveUnmarked(Heap* heap);
+
+  void DeferredFormatStackTrace(Isolate* isolate);
+
+  void UpdateReferences();
+
+  void UpdateReferencesInNewSpace(Heap* heap);
+
+ private:
+  static const int kBudgetPerGC = 16;
+
+  ErrorObjectList() : nested_(false) { }
+
+  friend class Heap;
+
+  List<Object*> list_;
+  bool nested_;
+
+  DISALLOW_COPY_AND_ASSIGN(ErrorObjectList);
+};
+
+
 enum ArrayStorageAllocationMode {
   DONT_INITIALIZE_ARRAY_ELEMENTS,
   INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
@@ -728,12 +764,16 @@
         Vector<const uc16> str,
         uint32_t hash_field);
 
-  MUST_USE_RESULT MaybeObject* AllocateInternalSymbol(
-      unibrow::CharacterStream* buffer, int chars, uint32_t hash_field);
+  template<typename T>
+  static inline bool IsOneByte(T t, int chars);
 
-  MUST_USE_RESULT MaybeObject* AllocateExternalSymbol(
-      Vector<const char> str,
-      int chars);
+  template<typename T>
+  MUST_USE_RESULT inline MaybeObject* AllocateInternalSymbol(
+      T t, int chars, uint32_t hash_field);
+
+  template<bool is_one_byte, typename T>
+  MUST_USE_RESULT MaybeObject* AllocateInternalSymbol(
+      T t, int chars, uint32_t hash_field);
 
   // Allocates and partially initializes a String.  There are two String
   // encodings: ASCII and two byte.  These functions allocate a string of the
@@ -1030,14 +1070,14 @@
   // Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
   // failed.
   // Please note this function does not perform a garbage collection.
-  MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str);
-  MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str);
-  MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str);
-  MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(const char* str) {
-    return LookupSymbol(CStrVector(str));
+  MUST_USE_RESULT MaybeObject* LookupUtf8Symbol(Vector<const char> str);
+  MUST_USE_RESULT MaybeObject* LookupUtf8Symbol(const char* str) {
+    return LookupUtf8Symbol(CStrVector(str));
   }
+  MUST_USE_RESULT MaybeObject* LookupOneByteSymbol(Vector<const char> str);
+  MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str);
   MUST_USE_RESULT MaybeObject* LookupSymbol(String* str);
-  MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(
+  MUST_USE_RESULT MaybeObject* LookupOneByteSymbol(
       Handle<SeqOneByteString> string, int from, int length);
 
   bool LookupSymbolIfExists(String* str, String** symbol);
@@ -1573,6 +1613,10 @@
     return &external_string_table_;
   }
 
+  ErrorObjectList* error_object_list() {
+    return &error_object_list_;
+  }
+
   // Returns the current sweep generation.
   int sweep_generation() {
     return sweep_generation_;
@@ -2149,6 +2193,8 @@
 
   ExternalStringTable external_string_table_;
 
+  ErrorObjectList error_object_list_;
+
   VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
 
   MemoryChunk* chunks_queued_for_free_;
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 0e6ea00..a05fa20 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1031,8 +1031,10 @@
 
 void HJSArrayLength::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
-  stream->Add(" ");
-  typecheck()->PrintNameTo(stream);
+  if (HasTypeCheck()) {
+    stream->Add(" ");
+    typecheck()->PrintNameTo(stream);
+  }
 }
 
 
@@ -1144,8 +1146,10 @@
 
 void HLoadElements::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
-  stream->Add(" ");
-  typecheck()->PrintNameTo(stream);
+  if (HasTypeCheck()) {
+    stream->Add(" ");
+    typecheck()->PrintNameTo(stream);
+  }
 }
 
 
@@ -1338,6 +1342,11 @@
     if (a->CanBeMinusZero() || a->CanBeNegative()) {
       result->set_can_be_minus_zero(true);
     }
+
+    if (right()->range()->Includes(-1) && left()->range()->Includes(kMinInt)) {
+      SetFlag(HValue::kCanOverflow);
+    }
+
     if (!right()->range()->CanBeZero()) {
       ClearFlag(HValue::kCanBeDivByZero);
     }
@@ -2023,12 +2032,16 @@
   stream->Add("[");
   key()->PrintNameTo(stream);
   if (IsDehoisted()) {
-    stream->Add(" + %d] ", index_offset());
+    stream->Add(" + %d]", index_offset());
   } else {
-    stream->Add("] ");
+    stream->Add("]");
   }
 
-  dependency()->PrintNameTo(stream);
+  if (HasDependency()) {
+    stream->Add(" ");
+    dependency()->PrintNameTo(stream);
+  }
+
   if (RequiresHoleCheck()) {
     stream->Add(" check_hole");
   }
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 161e654..d8f5dec 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -1937,7 +1937,7 @@
     // object. It is guaranteed to be 32 bit integer, but it can be
     // represented as either a smi or heap number.
     SetOperandAt(0, value);
-    SetOperandAt(1, typecheck);
+    SetOperandAt(1, typecheck != NULL ? typecheck : value);
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
     SetGVNFlag(kDependsOnArrayLengths);
@@ -1951,7 +1951,11 @@
   virtual void PrintDataTo(StringStream* stream);
 
   HValue* value() { return OperandAt(0); }
-  HValue* typecheck() { return OperandAt(1); }
+  HValue* typecheck() {
+    ASSERT(HasTypeCheck());
+    return OperandAt(1);
+  }
+  bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
 
   DECLARE_CONCRETE_INSTRUCTION(JSArrayLength)
 
@@ -2152,14 +2156,18 @@
  public:
   HLoadElements(HValue* value, HValue* typecheck) {
     SetOperandAt(0, value);
-    SetOperandAt(1, typecheck);
+    SetOperandAt(1, typecheck != NULL ? typecheck : value);
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
     SetGVNFlag(kDependsOnElementsPointer);
   }
 
   HValue* value() { return OperandAt(0); }
-  HValue* typecheck() { return OperandAt(1); }
+  HValue* typecheck() {
+    ASSERT(HasTypeCheck());
+    return OperandAt(1);
+  }
+  bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
 
   virtual void PrintDataTo(StringStream* stream);
 
@@ -4366,7 +4374,7 @@
 
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
-    SetOperandAt(2, dependency);
+    SetOperandAt(2, dependency != NULL ? dependency : obj);
 
     if (!is_external()) {
       // I can detect the case between storing double (holey and fast) and
@@ -4407,7 +4415,11 @@
   }
   HValue* elements() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
-  HValue* dependency() { return OperandAt(2); }
+  HValue* dependency() {
+    ASSERT(HasDependency());
+    return OperandAt(2);
+  }
+  bool HasDependency() const { return OperandAt(0) != OperandAt(2); }
   uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); }
   void SetIndexOffset(uint32_t index_offset) {
     bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 97306a1..bf508da 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -138,6 +138,7 @@
   ASSERT(HasEnvironment());
   HEnvironment* environment = last_environment();
   ASSERT(ast_id.IsNone() ||
+         ast_id == BailoutId::StubEntry() ||
          environment->closure()->shared()->VerifyBailoutId(ast_id));
 
   int push_count = environment->push_count();
@@ -621,33 +622,204 @@
 }
 
 
-HGraphBuilder::HGraphBuilder(CompilationInfo* info,
-                             TypeFeedbackOracle* oracle)
-    : function_state_(NULL),
+HGraph* HGraphBuilder::CreateGraph() {
+  graph_ = new(zone()) HGraph(info_);
+  if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info_);
+  HPhase phase("H_Block building");
+  set_current_block(graph()->entry_block());
+  if (!BuildGraph()) return NULL;
+  return graph_;
+}
+
+
+HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
+  ASSERT(current_block() != NULL);
+  current_block()->AddInstruction(instr);
+  return instr;
+}
+
+
+void HGraphBuilder::AddSimulate(BailoutId id,
+                                RemovableSimulate removable) {
+  ASSERT(current_block() != NULL);
+  current_block()->AddSimulate(id, removable);
+}
+
+
+HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
+    HValue* external_elements,
+    HValue* checked_key,
+    HValue* val,
+    HValue* dependency,
+    ElementsKind elements_kind,
+    bool is_store) {
+  Zone* zone = this->zone();
+  if (is_store) {
+    ASSERT(val != NULL);
+    switch (elements_kind) {
+      case EXTERNAL_PIXEL_ELEMENTS: {
+        val = AddInstruction(new(zone) HClampToUint8(val));
+        break;
+      }
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+        break;
+      }
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+        break;
+      case FAST_SMI_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+    return new(zone) HStoreKeyed(external_elements, checked_key,
+                                 val, elements_kind);
+  } else {
+    ASSERT(val == NULL);
+    HLoadKeyed* load =
+       new(zone) HLoadKeyed(
+           external_elements, checked_key, dependency, elements_kind);
+    if (FLAG_opt_safe_uint32_operations &&
+        elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+      graph()->RecordUint32Instruction(load);
+    }
+    return load;
+  }
+}
+
+
+HInstruction* HGraphBuilder::BuildFastElementAccess(
+    HValue* elements,
+    HValue* checked_key,
+    HValue* val,
+    HValue* load_dependency,
+    ElementsKind elements_kind,
+    bool is_store) {
+  Zone* zone = this->zone();
+  if (is_store) {
+    ASSERT(val != NULL);
+    switch (elements_kind) {
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+        // Smi-only arrays need a smi check.
+        AddInstruction(new(zone) HCheckSmi(val));
+        // Fall through.
+      case FAST_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+        return new(zone) HStoreKeyed(elements, checked_key, val, elements_kind);
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+  // It's an element load (!is_store).
+  return new(zone) HLoadKeyed(elements,
+                              checked_key,
+                              load_dependency,
+                              elements_kind);
+}
+
+
+HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
+    HValue* object,
+    HValue* key,
+    HValue* val,
+    HCheckMaps* mapcheck,
+    bool is_js_array,
+    ElementsKind elements_kind,
+    bool is_store) {
+  Zone* zone = this->zone();
+  // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
+  // on a HElementsTransition instruction. The flag can also be removed if the
+  // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
+  // ElementsKind transitions. Finally, the dependency can be removed for stores
+  // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
+  // generated store code.
+  if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
+      (elements_kind == FAST_ELEMENTS && is_store)) {
+    if (mapcheck != NULL) {
+      mapcheck->ClearGVNFlag(kDependsOnElementsKind);
+    }
+  }
+  bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
+  bool fast_elements = IsFastObjectElementsKind(elements_kind);
+  HInstruction* elements =
+      AddInstruction(new(zone) HLoadElements(object, mapcheck));
+  if (is_store && (fast_elements || fast_smi_only_elements)) {
+    HCheckMaps* check_cow_map = new(zone) HCheckMaps(
+        elements, Isolate::Current()->factory()->fixed_array_map(), zone);
+    check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+    AddInstruction(check_cow_map);
+  }
+  HInstruction* length = NULL;
+  HInstruction* checked_key = NULL;
+  if (IsExternalArrayElementsKind(elements_kind)) {
+    length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+    checked_key = AddInstruction(new(zone) HBoundsCheck(key, length,
+                                                        ALLOW_SMI_KEY));
+    HLoadExternalArrayPointer* external_elements =
+        new(zone) HLoadExternalArrayPointer(elements);
+    AddInstruction(external_elements);
+    return BuildExternalArrayElementAccess(
+        external_elements, checked_key, val, mapcheck,
+        elements_kind, is_store);
+  }
+  ASSERT(fast_smi_only_elements ||
+         fast_elements ||
+         IsFastDoubleElementsKind(elements_kind));
+  if (is_js_array) {
+    length = AddInstruction(new(zone) HJSArrayLength(object, mapcheck,
+                                                     HType::Smi()));
+  } else {
+    length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+  }
+  checked_key = AddInstruction(new(zone) HBoundsCheck(key, length,
+                                                      ALLOW_SMI_KEY));
+  return BuildFastElementAccess(elements, checked_key, val, mapcheck,
+                                elements_kind, is_store);
+}
+
+
+HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
+                                               TypeFeedbackOracle* oracle)
+    : HGraphBuilder(info),
+      function_state_(NULL),
       initial_function_state_(this, info, oracle, NORMAL_RETURN),
       ast_context_(NULL),
       break_scope_(NULL),
-      graph_(NULL),
-      current_block_(NULL),
       inlined_count_(0),
       globals_(10, info->zone()),
-      zone_(info->zone()),
       inline_bailout_(false) {
   // This is not initialized in the initializer list because the
   // constructor for the initial state relies on function_state_ == NULL
   // to know it's the initial state.
   function_state_= &initial_function_state_;
+  InitializeAstVisitor();
 }
 
-HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
-                                       HBasicBlock* second,
-                                       BailoutId join_id) {
+
+HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first,
+                                                HBasicBlock* second,
+                                                BailoutId join_id) {
   if (first == NULL) {
     return second;
   } else if (second == NULL) {
     return first;
   } else {
-    HBasicBlock* join_block = graph_->CreateBasicBlock();
+    HBasicBlock* join_block = graph()->CreateBasicBlock();
     first->Goto(join_block);
     second->Goto(join_block);
     join_block->SetJoinId(join_id);
@@ -656,9 +828,9 @@
 }
 
 
-HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement,
-                                         HBasicBlock* exit_block,
-                                         HBasicBlock* continue_block) {
+HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement,
+                                                  HBasicBlock* exit_block,
+                                                  HBasicBlock* continue_block) {
   if (continue_block != NULL) {
     if (exit_block != NULL) exit_block->Goto(continue_block);
     continue_block->SetJoinId(statement->ContinueId());
@@ -668,11 +840,11 @@
 }
 
 
-HBasicBlock* HGraphBuilder::CreateLoop(IterationStatement* statement,
-                                       HBasicBlock* loop_entry,
-                                       HBasicBlock* body_exit,
-                                       HBasicBlock* loop_successor,
-                                       HBasicBlock* break_block) {
+HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement,
+                                                HBasicBlock* loop_entry,
+                                                HBasicBlock* body_exit,
+                                                HBasicBlock* loop_successor,
+                                                HBasicBlock* break_block) {
   if (body_exit != NULL) body_exit->Goto(loop_entry);
   loop_entry->PostProcessLoopHeader(statement);
   if (break_block != NULL) {
@@ -703,8 +875,13 @@
       is_recursive_(false),
       use_optimistic_licm_(false),
       type_change_checksum_(0) {
-  start_environment_ =
-      new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
+  if (info->IsStub()) {
+    start_environment_ =
+        new(zone_) HEnvironment(zone_);
+  } else {
+    start_environment_ =
+        new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
+  }
   start_environment_->set_ast_id(BailoutId::FunctionEntry());
   entry_block_ = CreateBasicBlock();
   entry_block_->SetInitialEnvironment(start_environment_);
@@ -2893,7 +3070,7 @@
 
 // Implementation of utility class to encapsulate the translation state for
 // a (possibly inlined) function.
-FunctionState::FunctionState(HGraphBuilder* owner,
+FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
                              CompilationInfo* info,
                              TypeFeedbackOracle* oracle,
                              InliningKind inlining_kind)
@@ -2942,7 +3119,7 @@
 
 // Implementation of utility classes to represent an expression's context in
 // the AST.
-AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
+AstContext::AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind)
     : owner_(owner),
       kind_(kind),
       outer_(owner->ast_context()),
@@ -3053,7 +3230,7 @@
 
 void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
   ASSERT(!instr->IsControlInstruction());
-  HGraphBuilder* builder = owner();
+  HOptimizedGraphBuilder* builder = owner();
   builder->AddInstruction(instr);
   // We expect a simulate after every expression with side effects, though
   // this one isn't actually needed (and wouldn't work if it were targeted).
@@ -3084,7 +3261,7 @@
   // connects a branch node to a join node.  We conservatively ensure that
   // property by always adding an empty block on the outgoing edges of this
   // branch.
-  HGraphBuilder* builder = owner();
+  HOptimizedGraphBuilder* builder = owner();
   if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
     builder->Bailout("arguments object value in a test context");
   }
@@ -3101,7 +3278,7 @@
 }
 
 
-// HGraphBuilder infrastructure for bailing out and checking bailouts.
+// HOptimizedGraphBuilder infrastructure for bailing out and checking bailouts.
 #define CHECK_BAILOUT(call)                     \
   do {                                          \
     call;                                       \
@@ -3116,25 +3293,26 @@
   } while (false)
 
 
-void HGraphBuilder::Bailout(const char* reason) {
+void HOptimizedGraphBuilder::Bailout(const char* reason) {
   info()->set_bailout_reason(reason);
   SetStackOverflow();
 }
 
 
-void HGraphBuilder::VisitForEffect(Expression* expr) {
+void HOptimizedGraphBuilder::VisitForEffect(Expression* expr) {
   EffectContext for_effect(this);
   Visit(expr);
 }
 
 
-void HGraphBuilder::VisitForValue(Expression* expr, ArgumentsAllowedFlag flag) {
+void HOptimizedGraphBuilder::VisitForValue(Expression* expr,
+                                           ArgumentsAllowedFlag flag) {
   ValueContext for_value(this, flag);
   Visit(expr);
 }
 
 
-void HGraphBuilder::VisitForTypeOf(Expression* expr) {
+void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
   ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
   for_value.set_for_typeof(true);
   Visit(expr);
@@ -3142,113 +3320,108 @@
 
 
 
-void HGraphBuilder::VisitForControl(Expression* expr,
-                                    HBasicBlock* true_block,
-                                    HBasicBlock* false_block) {
+void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
+                                             HBasicBlock* true_block,
+                                             HBasicBlock* false_block) {
   TestContext for_test(this, expr, oracle(), true_block, false_block);
   Visit(expr);
 }
 
 
-void HGraphBuilder::VisitArgument(Expression* expr) {
+void HOptimizedGraphBuilder::VisitArgument(Expression* expr) {
   CHECK_ALIVE(VisitForValue(expr));
   Push(AddInstruction(new(zone()) HPushArgument(Pop())));
 }
 
 
-void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) {
+void HOptimizedGraphBuilder::VisitArgumentList(
+    ZoneList<Expression*>* arguments) {
   for (int i = 0; i < arguments->length(); i++) {
     CHECK_ALIVE(VisitArgument(arguments->at(i)));
   }
 }
 
 
-void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) {
+void HOptimizedGraphBuilder::VisitExpressions(
+    ZoneList<Expression*>* exprs) {
   for (int i = 0; i < exprs->length(); ++i) {
     CHECK_ALIVE(VisitForValue(exprs->at(i)));
   }
 }
 
 
-HGraph* HGraphBuilder::CreateGraph() {
-  graph_ = new(zone()) HGraph(info());
-  if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
+bool HOptimizedGraphBuilder::BuildGraph() {
+  Scope* scope = info()->scope();
+  if (scope->HasIllegalRedeclaration()) {
+    Bailout("function with illegal redeclaration");
+    return false;
+  }
+  if (scope->calls_eval()) {
+    Bailout("function calls eval");
+    return false;
+  }
+  SetUpScope(scope);
 
-  {
-    HPhase phase("H_Block building");
-    current_block_ = graph()->entry_block();
+  // Add an edge to the body entry.  This is warty: the graph's start
+  // environment will be used by the Lithium translation as the initial
+  // environment on graph entry, but it has now been mutated by the
+  // Hydrogen translation of the instructions in the start block.  This
+  // environment uses values which have not been defined yet.  These
+  // Hydrogen instructions will then be replayed by the Lithium
+  // translation, so they cannot have an environment effect.  The edge to
+  // the body's entry block (along with some special logic for the start
+  // block in HInstruction::InsertAfter) seals the start block from
+  // getting unwanted instructions inserted.
+  //
+  // TODO(kmillikin): Fix this.  Stop mutating the initial environment.
+  // Make the Hydrogen instructions in the initial block into Hydrogen
+  // values (but not instructions), present in the initial environment and
+  // not replayed by the Lithium translation.
+  HEnvironment* initial_env = environment()->CopyWithoutHistory();
+  HBasicBlock* body_entry = CreateBasicBlock(initial_env);
+  current_block()->Goto(body_entry);
+  body_entry->SetJoinId(BailoutId::FunctionEntry());
+  set_current_block(body_entry);
 
-    Scope* scope = info()->scope();
-    if (scope->HasIllegalRedeclaration()) {
-      Bailout("function with illegal redeclaration");
-      return NULL;
-    }
-    if (scope->calls_eval()) {
-      Bailout("function calls eval");
-      return NULL;
-    }
-    SetUpScope(scope);
+  // Handle implicit declaration of the function name in named function
+  // expressions before other declarations.
+  if (scope->is_function_scope() && scope->function() != NULL) {
+    VisitVariableDeclaration(scope->function());
+  }
+  VisitDeclarations(scope->declarations());
+  AddSimulate(BailoutId::Declarations());
 
-    // Add an edge to the body entry.  This is warty: the graph's start
-    // environment will be used by the Lithium translation as the initial
-    // environment on graph entry, but it has now been mutated by the
-    // Hydrogen translation of the instructions in the start block.  This
-    // environment uses values which have not been defined yet.  These
-    // Hydrogen instructions will then be replayed by the Lithium
-    // translation, so they cannot have an environment effect.  The edge to
-    // the body's entry block (along with some special logic for the start
-    // block in HInstruction::InsertAfter) seals the start block from
-    // getting unwanted instructions inserted.
-    //
-    // TODO(kmillikin): Fix this.  Stop mutating the initial environment.
-    // Make the Hydrogen instructions in the initial block into Hydrogen
-    // values (but not instructions), present in the initial environment and
-    // not replayed by the Lithium translation.
-    HEnvironment* initial_env = environment()->CopyWithoutHistory();
-    HBasicBlock* body_entry = CreateBasicBlock(initial_env);
-    current_block()->Goto(body_entry);
-    body_entry->SetJoinId(BailoutId::FunctionEntry());
-    set_current_block(body_entry);
+  HValue* context = environment()->LookupContext();
+  AddInstruction(
+      new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
 
-    // Handle implicit declaration of the function name in named function
-    // expressions before other declarations.
-    if (scope->is_function_scope() && scope->function() != NULL) {
-      VisitVariableDeclaration(scope->function());
-    }
-    VisitDeclarations(scope->declarations());
-    AddSimulate(BailoutId::Declarations());
+  VisitStatements(info()->function()->body());
+  if (HasStackOverflow()) return false;
 
-    HValue* context = environment()->LookupContext();
-    AddInstruction(
-        new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
-
-    VisitStatements(info()->function()->body());
-    if (HasStackOverflow()) return NULL;
-
-    if (current_block() != NULL) {
-      HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined());
-      current_block()->FinishExit(instr);
-      set_current_block(NULL);
-    }
-
-    // If the checksum of the number of type info changes is the same as the
-    // last time this function was compiled, then this recompile is likely not
-    // due to missing/inadequate type feedback, but rather too aggressive
-    // optimization. Disable optimistic LICM in that case.
-    Handle<Code> unoptimized_code(info()->shared_info()->code());
-    ASSERT(unoptimized_code->kind() == Code::FUNCTION);
-    Handle<TypeFeedbackInfo> type_info(
-        TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
-    int checksum = type_info->own_type_change_checksum();
-    int composite_checksum = graph()->update_type_change_checksum(checksum);
-    graph()->set_use_optimistic_licm(
-        !type_info->matches_inlined_type_change_checksum(composite_checksum));
-    type_info->set_inlined_type_change_checksum(composite_checksum);
+  if (current_block() != NULL) {
+    HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined());
+    current_block()->FinishExit(instr);
+    set_current_block(NULL);
   }
 
-  return graph();
+  // If the checksum of the number of type info changes is the same as the
+  // last time this function was compiled, then this recompile is likely not
+  // due to missing/inadequate type feedback, but rather too aggressive
+  // optimization. Disable optimistic LICM in that case.
+  Handle<Code> unoptimized_code(info()->shared_info()->code());
+  ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+  Handle<TypeFeedbackInfo> type_info(
+      TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
+  int checksum = type_info->own_type_change_checksum();
+  int composite_checksum = graph()->update_type_change_checksum(checksum);
+  graph()->set_use_optimistic_licm(
+      !type_info->matches_inlined_type_change_checksum(composite_checksum));
+  type_info->set_inlined_type_change_checksum(composite_checksum);
+
+  return true;
 }
 
+
 bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
   *bailout_reason = SmartArrayPointer<char>();
   OrderBlocks();
@@ -3796,33 +3969,20 @@
 }
 
 
-HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
-  ASSERT(current_block() != NULL);
-  current_block()->AddInstruction(instr);
-  return instr;
-}
-
-
-void HGraphBuilder::AddSimulate(BailoutId ast_id, RemovableSimulate removable) {
-  ASSERT(current_block() != NULL);
-  current_block()->AddSimulate(ast_id, removable);
-}
-
-
-void HGraphBuilder::AddPhi(HPhi* instr) {
+void HOptimizedGraphBuilder::AddPhi(HPhi* instr) {
   ASSERT(current_block() != NULL);
   current_block()->AddPhi(instr);
 }
 
 
-void HGraphBuilder::PushAndAdd(HInstruction* instr) {
+void HOptimizedGraphBuilder::PushAndAdd(HInstruction* instr) {
   Push(instr);
   AddInstruction(instr);
 }
 
 
 template <class Instruction>
-HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) {
+HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
   int count = call->argument_count();
   ZoneList<HValue*> arguments(count, zone());
   for (int i = 0; i < count; ++i) {
@@ -3836,11 +3996,11 @@
 }
 
 
-void HGraphBuilder::SetUpScope(Scope* scope) {
+void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
   HConstant* undefined_constant = new(zone()) HConstant(
       isolate()->factory()->undefined_value(), Representation::Tagged());
   AddInstruction(undefined_constant);
-  graph_->set_undefined_constant(undefined_constant);
+  graph()->set_undefined_constant(undefined_constant);
 
   HArgumentsObject* object = new(zone()) HArgumentsObject;
   AddInstruction(object);
@@ -3879,21 +4039,21 @@
 }
 
 
-void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
+void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
   for (int i = 0; i < statements->length(); i++) {
     CHECK_ALIVE(Visit(statements->at(i)));
   }
 }
 
 
-HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
+HBasicBlock* HOptimizedGraphBuilder::CreateBasicBlock(HEnvironment* env) {
   HBasicBlock* b = graph()->CreateBasicBlock();
   b->SetInitialEnvironment(env);
   return b;
 }
 
 
-HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
+HBasicBlock* HOptimizedGraphBuilder::CreateLoopHeaderBlock() {
   HBasicBlock* header = graph()->CreateBasicBlock();
   HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
   header->SetInitialEnvironment(entry_env);
@@ -3902,7 +4062,7 @@
 }
 
 
-void HGraphBuilder::VisitBlock(Block* stmt) {
+void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -3922,7 +4082,8 @@
 }
 
 
-void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+void HOptimizedGraphBuilder::VisitExpressionStatement(
+    ExpressionStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -3930,14 +4091,14 @@
 }
 
 
-void HGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+void HOptimizedGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
 }
 
 
-void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -3976,7 +4137,7 @@
 }
 
 
-HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
+HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get(
     BreakableStatement* stmt,
     BreakType type,
     int* drop_extra) {
@@ -4015,7 +4176,8 @@
 }
 
 
-void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+void HOptimizedGraphBuilder::VisitContinueStatement(
+    ContinueStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4029,7 +4191,7 @@
 }
 
 
-void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4043,7 +4205,7 @@
 }
 
 
-void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4115,7 +4277,7 @@
 }
 
 
-void HGraphBuilder::VisitWithStatement(WithStatement* stmt) {
+void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4123,7 +4285,7 @@
 }
 
 
-void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4309,12 +4471,12 @@
 }
 
 
-bool HGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
+bool HOptimizedGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
   return statement->OsrEntryId() == info()->osr_ast_id();
 }
 
 
-bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
+bool HOptimizedGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
   if (!HasOsrEntryAt(statement)) return false;
 
   HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
@@ -4364,9 +4526,9 @@
 }
 
 
-void HGraphBuilder::VisitLoopBody(IterationStatement* stmt,
-                                  HBasicBlock* loop_entry,
-                                  BreakAndContinueInfo* break_info) {
+void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
+                                           HBasicBlock* loop_entry,
+                                           BreakAndContinueInfo* break_info) {
   BreakAndContinueScope push(break_info, this);
   AddSimulate(stmt->StackCheckId());
   HValue* context = environment()->LookupContext();
@@ -4379,7 +4541,7 @@
 }
 
 
-void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4422,7 +4584,7 @@
 }
 
 
-void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4466,7 +4628,7 @@
 }
 
 
-void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
+void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4518,7 +4680,7 @@
 }
 
 
-void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4639,7 +4801,7 @@
 }
 
 
-void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4647,7 +4809,8 @@
 }
 
 
-void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+void HOptimizedGraphBuilder::VisitTryFinallyStatement(
+    TryFinallyStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4655,7 +4818,7 @@
 }
 
 
-void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4683,7 +4846,7 @@
 }
 
 
-void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4702,7 +4865,7 @@
 }
 
 
-void HGraphBuilder::VisitSharedFunctionInfoLiteral(
+void HOptimizedGraphBuilder::VisitSharedFunctionInfoLiteral(
     SharedFunctionInfoLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
@@ -4711,7 +4874,7 @@
 }
 
 
-void HGraphBuilder::VisitConditional(Conditional* expr) {
+void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4749,8 +4912,9 @@
 }
 
 
-HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
-    Variable* var, LookupResult* lookup, bool is_store) {
+HOptimizedGraphBuilder::GlobalPropertyAccess
+    HOptimizedGraphBuilder::LookupGlobalProperty(
+        Variable* var, LookupResult* lookup, bool is_store) {
   if (var->is_this() || !info()->has_global_object()) {
     return kUseGeneric;
   }
@@ -4766,7 +4930,7 @@
 }
 
 
-HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
+HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
   ASSERT(var->IsContextSlot());
   HValue* context = environment()->LookupContext();
   int length = info()->scope()->ContextChainLength(var->scope());
@@ -4779,7 +4943,7 @@
 }
 
 
-void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4852,7 +5016,7 @@
 }
 
 
-void HGraphBuilder::VisitLiteral(Literal* expr) {
+void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -4862,7 +5026,7 @@
 }
 
 
-void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -5021,7 +5185,7 @@
 }
 
 
-void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -5126,7 +5290,7 @@
 }
 
 
-void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -5260,18 +5424,19 @@
 }
 
 
-void HGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
-                                                Handle<Map> map) {
+void HOptimizedGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
+                                                         Handle<Map> map) {
   AddInstruction(new(zone()) HCheckNonSmi(object));
   AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
 }
 
 
-HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
-                                                  Handle<String> name,
-                                                  HValue* value,
-                                                  Handle<Map> map,
-                                                  LookupResult* lookup) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
+    HValue* object,
+    Handle<String> name,
+    HValue* value,
+    Handle<Map> map,
+    LookupResult* lookup) {
   ASSERT(lookup->IsFound());
   // If the property does not exist yet, we have to check that it wasn't made
   // readonly or turned into a setter by some meanwhile modifications on the
@@ -5323,9 +5488,10 @@
 }
 
 
-HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
-                                                    Handle<String> name,
-                                                    HValue* value) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
+    HValue* object,
+    Handle<String> name,
+    HValue* value) {
   HValue* context = environment()->LookupContext();
   return new(zone()) HStoreNamedGeneric(
                          context,
@@ -5336,11 +5502,12 @@
 }
 
 
-HInstruction* HGraphBuilder::BuildCallSetter(HValue* object,
-                                             HValue* value,
-                                             Handle<Map> map,
-                                             Handle<JSFunction> setter,
-                                             Handle<JSObject> holder) {
+HInstruction* HOptimizedGraphBuilder::BuildCallSetter(
+    HValue* object,
+    HValue* value,
+    Handle<Map> map,
+    Handle<JSFunction> setter,
+    Handle<JSObject> holder) {
   AddCheckConstantFunction(holder, object, map);
   AddInstruction(new(zone()) HPushArgument(object));
   AddInstruction(new(zone()) HPushArgument(value));
@@ -5348,10 +5515,11 @@
 }
 
 
-HInstruction* HGraphBuilder::BuildStoreNamedMonomorphic(HValue* object,
-                                                        Handle<String> name,
-                                                        HValue* value,
-                                                        Handle<Map> map) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
+    HValue* object,
+    Handle<String> name,
+    HValue* value,
+    Handle<Map> map) {
   // Handle a store to a known field.
   LookupResult lookup(isolate());
   if (ComputeLoadStoreField(map, name, &lookup, true)) {
@@ -5364,10 +5532,11 @@
 }
 
 
-void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
-                                                    HValue* object,
-                                                    SmallMapList* types,
-                                                    Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
+    Property* expr,
+    HValue* object,
+    SmallMapList* types,
+    Handle<String> name) {
   int count = 0;
   int previous_field_offset = 0;
   bool previous_field_is_in_object = false;
@@ -5419,11 +5588,12 @@
 }
 
 
-void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
-                                                     HValue* object,
-                                                     HValue* value,
-                                                     SmallMapList* types,
-                                                     Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
+    Assignment* expr,
+    HValue* object,
+    HValue* value,
+    SmallMapList* types,
+    Handle<String> name) {
   // TODO(ager): We should recognize when the prototype chains for different
   // maps are identical. In that case we can avoid repeatedly generating the
   // same prototype map checks.
@@ -5495,7 +5665,7 @@
 }
 
 
-void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
   Property* prop = expr->target()->AsProperty();
   ASSERT(prop != NULL);
   expr->RecordTypeFeedback(oracle(), zone());
@@ -5578,10 +5748,11 @@
 // Because not every expression has a position and there is not common
 // superclass of Assignment and CountOperation, we cannot just pass the
 // owning expression instead of position and ast_id separately.
-void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
-                                                   HValue* value,
-                                                   int position,
-                                                   BailoutId ast_id) {
+void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
+    Variable* var,
+    HValue* value,
+    int position,
+    BailoutId ast_id) {
   LookupResult lookup(isolate());
   GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
   if (type == kUseCell) {
@@ -5612,7 +5783,7 @@
 }
 
 
-void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
   Expression* target = expr->target();
   VariableProxy* proxy = target->AsVariableProxy();
   Property* prop = target->AsProperty();
@@ -5739,7 +5910,7 @@
       }
 
       HInstruction* store;
-      if (!monomorphic) {
+      if (!monomorphic || map->is_observed()) {
         // If we don't know the monomorphic type, do a generic store.
         CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, instr));
       } else {
@@ -5809,7 +5980,7 @@
 }
 
 
-void HGraphBuilder::VisitAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -5936,7 +6107,7 @@
 }
 
 
-void HGraphBuilder::VisitThrow(Throw* expr) {
+void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -5957,9 +6128,10 @@
 }
 
 
-HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
-                                                    Handle<Map> map,
-                                                    LookupResult* lookup) {
+HLoadNamedField* HOptimizedGraphBuilder::BuildLoadNamedField(
+    HValue* object,
+    Handle<Map> map,
+    LookupResult* lookup) {
   int index = lookup->GetLocalFieldIndexFromMap(*map);
   if (index < 0) {
     // Negative property indices are in-object properties, indexed
@@ -5974,9 +6146,10 @@
 }
 
 
-HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* object,
-                                                   Handle<String> name,
-                                                   Property* expr) {
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
+    HValue* object,
+    Handle<String> name,
+    Property* expr) {
   if (expr->IsUninitialized() && !FLAG_always_opt) {
     AddInstruction(new(zone()) HSoftDeoptimize);
     current_block()->MarkAsDeoptimizing();
@@ -5986,20 +6159,22 @@
 }
 
 
-HInstruction* HGraphBuilder::BuildCallGetter(HValue* object,
-                                             Handle<Map> map,
-                                             Handle<JSFunction> getter,
-                                             Handle<JSObject> holder) {
+HInstruction* HOptimizedGraphBuilder::BuildCallGetter(
+    HValue* object,
+    Handle<Map> map,
+    Handle<JSFunction> getter,
+    Handle<JSObject> holder) {
   AddCheckConstantFunction(holder, object, map);
   AddInstruction(new(zone()) HPushArgument(object));
   return new(zone()) HCallConstantFunction(getter, 1);
 }
 
 
-HInstruction* HGraphBuilder::BuildLoadNamedMonomorphic(HValue* object,
-                                                       Handle<String> name,
-                                                       Property* expr,
-                                                       Handle<Map> map) {
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
+    HValue* object,
+    Handle<String> name,
+    Property* expr,
+    Handle<Map> map) {
   // Handle a load from a known field.
   ASSERT(!map->is_dictionary_map());
   LookupResult lookup(isolate());
@@ -6033,174 +6208,34 @@
 }
 
 
-HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
-                                                   HValue* key) {
+HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
+                                                            HValue* key) {
   HValue* context = environment()->LookupContext();
   return new(zone()) HLoadKeyedGeneric(context, object, key);
 }
 
 
-HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
-    HValue* external_elements,
-    HValue* checked_key,
+HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
+    HValue* object,
+    HValue* key,
     HValue* val,
     HValue* dependency,
-    ElementsKind elements_kind,
+    Handle<Map> map,
     bool is_store) {
-  if (is_store) {
-    ASSERT(val != NULL);
-    switch (elements_kind) {
-      case EXTERNAL_PIXEL_ELEMENTS: {
-        val = AddInstruction(new(zone()) HClampToUint8(val));
-        break;
-      }
-      case EXTERNAL_BYTE_ELEMENTS:
-      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      case EXTERNAL_SHORT_ELEMENTS:
-      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      case EXTERNAL_INT_ELEMENTS:
-      case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
-        break;
-      }
-      case EXTERNAL_FLOAT_ELEMENTS:
-      case EXTERNAL_DOUBLE_ELEMENTS:
-        break;
-      case FAST_SMI_ELEMENTS:
-      case FAST_ELEMENTS:
-      case FAST_DOUBLE_ELEMENTS:
-      case FAST_HOLEY_SMI_ELEMENTS:
-      case FAST_HOLEY_ELEMENTS:
-      case FAST_HOLEY_DOUBLE_ELEMENTS:
-      case DICTIONARY_ELEMENTS:
-      case NON_STRICT_ARGUMENTS_ELEMENTS:
-        UNREACHABLE();
-        break;
-    }
-    return new(zone()) HStoreKeyed(external_elements,
-                                   checked_key,
-                                   val,
-                                   elements_kind);
-  } else {
-    ASSERT(val == NULL);
-    HLoadKeyed* load =
-       new(zone()) HLoadKeyed(
-           external_elements, checked_key, dependency, elements_kind);
-    if (FLAG_opt_safe_uint32_operations &&
-        elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
-      graph()->RecordUint32Instruction(load);
-    }
-    return load;
-  }
-}
-
-
-HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
-                                                    HValue* checked_key,
-                                                    HValue* val,
-                                                    HValue* load_dependency,
-                                                    ElementsKind elements_kind,
-                                                    bool is_store) {
-  if (is_store) {
-    ASSERT(val != NULL);
-    switch (elements_kind) {
-      case FAST_SMI_ELEMENTS:
-      case FAST_HOLEY_SMI_ELEMENTS:
-        // Smi-only arrays need a smi check.
-        AddInstruction(new(zone()) HCheckSmi(val));
-        // Fall through.
-      case FAST_ELEMENTS:
-      case FAST_HOLEY_ELEMENTS:
-      case FAST_DOUBLE_ELEMENTS:
-      case FAST_HOLEY_DOUBLE_ELEMENTS:
-        return new(zone()) HStoreKeyed(
-            elements, checked_key, val, elements_kind);
-      default:
-        UNREACHABLE();
-        return NULL;
-    }
-  }
-  // It's an element load (!is_store).
-  return new(zone()) HLoadKeyed(elements,
-                                checked_key,
-                                load_dependency,
-                                elements_kind);
-}
-
-
-HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
-                                                           HValue* key,
-                                                           HValue* val,
-                                                           HValue* dependency,
-                                                           Handle<Map> map,
-                                                           bool is_store) {
   HCheckMaps* mapcheck = new(zone()) HCheckMaps(object, map,
                                                 zone(), dependency);
   AddInstruction(mapcheck);
   if (dependency) {
     mapcheck->ClearGVNFlag(kDependsOnElementsKind);
   }
-  return BuildUncheckedMonomorphicElementAccess(object, key, val,
-                                                mapcheck, map, is_store);
+  return BuildUncheckedMonomorphicElementAccess(
+      object, key, val,
+      mapcheck, map->instance_type() == JS_ARRAY_TYPE,
+      map->elements_kind(), is_store);
 }
 
 
-HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
-    HValue* object,
-    HValue* key,
-    HValue* val,
-    HCheckMaps* mapcheck,
-    Handle<Map> map,
-    bool is_store) {
-  // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
-  // on a HElementsTransition instruction. The flag can also be removed if the
-  // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
-  // ElementsKind transitions. Finally, the dependency can be removed for stores
-  // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
-  // generated store code.
-  if ((map->elements_kind() == FAST_HOLEY_ELEMENTS) ||
-      (map->elements_kind() == FAST_ELEMENTS && is_store)) {
-    mapcheck->ClearGVNFlag(kDependsOnElementsKind);
-  }
-  bool fast_smi_only_elements = map->has_fast_smi_elements();
-  bool fast_elements = map->has_fast_object_elements();
-  HInstruction* elements =
-      AddInstruction(new(zone()) HLoadElements(object, mapcheck));
-  if (is_store && (fast_elements || fast_smi_only_elements)) {
-    HCheckMaps* check_cow_map = new(zone()) HCheckMaps(
-        elements, isolate()->factory()->fixed_array_map(), zone());
-    check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
-    AddInstruction(check_cow_map);
-  }
-  HInstruction* length = NULL;
-  HInstruction* checked_key = NULL;
-  if (map->has_external_array_elements()) {
-    length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
-    checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
-                                                          ALLOW_SMI_KEY));
-    HLoadExternalArrayPointer* external_elements =
-        new(zone()) HLoadExternalArrayPointer(elements);
-    AddInstruction(external_elements);
-    return BuildExternalArrayElementAccess(
-        external_elements, checked_key, val, mapcheck,
-        map->elements_kind(), is_store);
-  }
-  ASSERT(fast_smi_only_elements ||
-         fast_elements ||
-         map->has_fast_double_elements());
-  if (map->instance_type() == JS_ARRAY_TYPE) {
-    length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck,
-                                                       HType::Smi()));
-  } else {
-    length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
-  }
-  checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
-                                                        ALLOW_SMI_KEY));
-  return BuildFastElementAccess(elements, checked_key, val, mapcheck,
-                                map->elements_kind(), is_store);
-}
-
-
-HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad(
+HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
     HValue* object,
     HValue* key,
     HValue* val,
@@ -6248,19 +6283,23 @@
   HCheckMaps* check_maps = new(zone()) HCheckMaps(object, maps, zone());
   AddInstruction(check_maps);
   HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
-      object, key, val, check_maps, most_general_consolidated_map, false);
+      object, key, val, check_maps,
+      most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
+      most_general_consolidated_map->elements_kind(),
+      false);
   return instr;
 }
 
 
-HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
-                                                      HValue* key,
-                                                      HValue* val,
-                                                      Expression* prop,
-                                                      BailoutId ast_id,
-                                                      int position,
-                                                      bool is_store,
-                                                      bool* has_side_effects) {
+HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
+    HValue* object,
+    HValue* key,
+    HValue* val,
+    Expression* prop,
+    BailoutId ast_id,
+    int position,
+    bool is_store,
+    bool* has_side_effects) {
   *has_side_effects = false;
   AddInstruction(new(zone()) HCheckNonSmi(object));
   SmallMapList* maps = prop->GetReceiverTypes();
@@ -6445,8 +6484,8 @@
         }
       } else {  // External array elements.
         access = AddInstruction(BuildExternalArrayElementAccess(
-            external_elements, checked_key, val, elements_kind_branch,
-            elements_kind, is_store));
+            external_elements, checked_key, val,
+            elements_kind_branch, elements_kind, is_store));
       }
       *has_side_effects |= access->HasObservableSideEffects();
       if (position != RelocInfo::kNoPosition) access->set_position(position);
@@ -6466,14 +6505,15 @@
 }
 
 
-HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
-                                                HValue* key,
-                                                HValue* val,
-                                                Expression* expr,
-                                                BailoutId ast_id,
-                                                int position,
-                                                bool is_store,
-                                                bool* has_side_effects) {
+HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
+    HValue* obj,
+    HValue* key,
+    HValue* val,
+    Expression* expr,
+    BailoutId ast_id,
+    int position,
+    bool is_store,
+    bool* has_side_effects) {
   ASSERT(!expr->IsPropertyName());
   HInstruction* instr = NULL;
   if (expr->IsMonomorphic()) {
@@ -6503,9 +6543,10 @@
 }
 
 
-HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
-                                                    HValue* key,
-                                                    HValue* value) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
+    HValue* object,
+    HValue* key,
+    HValue* value) {
   HValue* context = environment()->LookupContext();
   return new(zone()) HStoreKeyedGeneric(
                          context,
@@ -6516,7 +6557,7 @@
 }
 
 
-void HGraphBuilder::EnsureArgumentsArePushedForAccess() {
+void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
   // Outermost function already has arguments on the stack.
   if (function_state()->outer() == NULL) return;
 
@@ -6544,7 +6585,7 @@
 }
 
 
-bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
+bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
   VariableProxy* proxy = expr->obj()->AsVariableProxy();
   if (proxy == NULL) return false;
   if (!proxy->var()->IsStackAllocated()) return false;
@@ -6603,7 +6644,7 @@
 }
 
 
-void HGraphBuilder::VisitProperty(Property* expr) {
+void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -6694,8 +6735,8 @@
 }
 
 
-void HGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
-                                          Handle<Map> receiver_map) {
+void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
+                                                   Handle<Map> receiver_map) {
   if (!holder.is_null()) {
     AddInstruction(new(zone()) HCheckPrototypeMaps(
         Handle<JSObject>(JSObject::cast(receiver_map->prototype())), holder));
@@ -6703,9 +6744,10 @@
 }
 
 
-void HGraphBuilder::AddCheckConstantFunction(Handle<JSObject> holder,
-                                             HValue* receiver,
-                                             Handle<Map> receiver_map) {
+void HOptimizedGraphBuilder::AddCheckConstantFunction(
+    Handle<JSObject> holder,
+    HValue* receiver,
+    Handle<Map> receiver_map) {
   // Constant functions have the nice property that the map will change if they
   // are overwritten.  Therefore it is enough to check the map of the holder and
   // its prototypes.
@@ -6747,10 +6789,11 @@
 }
 
 
-void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
-                                               HValue* receiver,
-                                               SmallMapList* types,
-                                               Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
+    Call* expr,
+    HValue* receiver,
+    SmallMapList* types,
+    Handle<String> name) {
   // TODO(ager): We should recognize when the prototype chains for different
   // maps are identical. In that case we can avoid repeatedly generating the
   // same prototype map checks.
@@ -6852,9 +6895,9 @@
 }
 
 
-void HGraphBuilder::TraceInline(Handle<JSFunction> target,
-                                Handle<JSFunction> caller,
-                                const char* reason) {
+void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
+                                         Handle<JSFunction> caller,
+                                         const char* reason) {
   if (FLAG_trace_inlining) {
     SmartArrayPointer<char> target_name =
         target->shared()->DebugName()->ToCString();
@@ -6873,7 +6916,7 @@
 static const int kNotInlinable = 1000000000;
 
 
-int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
+int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
   if (!FLAG_use_inlining) return kNotInlinable;
 
   // Precondition: call is monomorphic and we have found a target with the
@@ -6904,13 +6947,13 @@
 }
 
 
-bool HGraphBuilder::TryInline(CallKind call_kind,
-                              Handle<JSFunction> target,
-                              int arguments_count,
-                              HValue* implicit_return_value,
-                              BailoutId ast_id,
-                              BailoutId return_id,
-                              InliningKind inlining_kind) {
+bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
+                                       Handle<JSFunction> target,
+                                       int arguments_count,
+                                       HValue* implicit_return_value,
+                                       BailoutId ast_id,
+                                       BailoutId return_id,
+                                       InliningKind inlining_kind) {
   int nodes_added = InliningAstSize(target);
   if (nodes_added == kNotInlinable) return false;
 
@@ -7219,7 +7262,7 @@
 }
 
 
-bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
+bool HOptimizedGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
   // The function call we are inlining is a method call if the call
   // is a property call.
   CallKind call_kind = (expr->expression()->AsProperty() == NULL)
@@ -7236,8 +7279,8 @@
 }
 
 
-bool HGraphBuilder::TryInlineConstruct(CallNew* expr,
-                                       HValue* implicit_return_value) {
+bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
+                                                HValue* implicit_return_value) {
   return TryInline(CALL_AS_FUNCTION,
                    expr->target(),
                    expr->arguments()->length(),
@@ -7248,8 +7291,8 @@
 }
 
 
-bool HGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
-                                    Property* prop) {
+bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
+                                             Property* prop) {
   return TryInline(CALL_AS_METHOD,
                    getter,
                    0,
@@ -7260,9 +7303,9 @@
 }
 
 
-bool HGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
-                                    Assignment* assignment,
-                                    HValue* implicit_return_value) {
+bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
+                                             Assignment* assignment,
+                                             HValue* implicit_return_value) {
   return TryInline(CALL_AS_METHOD,
                    setter,
                    1,
@@ -7273,7 +7316,8 @@
 }
 
 
-bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
+bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
+                                                          bool drop_extra) {
   if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
   BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
   switch (id) {
@@ -7307,10 +7351,11 @@
 }
 
 
-bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
-                                               HValue* receiver,
-                                               Handle<Map> receiver_map,
-                                               CheckType check_type) {
+bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
+    Call* expr,
+    HValue* receiver,
+    Handle<Map> receiver_map,
+    CheckType check_type) {
   ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
   // Try to inline calls like Math.* as operations in the calling function.
   if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
@@ -7440,7 +7485,7 @@
 }
 
 
-bool HGraphBuilder::TryCallApply(Call* expr) {
+bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
   Expression* callee = expr->expression();
   Property* prop = callee->AsProperty();
   ASSERT(prop != NULL);
@@ -7568,7 +7613,7 @@
 }
 
 
-void HGraphBuilder::VisitCall(Call* expr) {
+void HOptimizedGraphBuilder::VisitCall(Call* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -7798,7 +7843,7 @@
 }
 
 
-void HGraphBuilder::VisitCallNew(CallNew* expr) {
+void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -7862,20 +7907,21 @@
 
 // Support for generating inlined runtime functions.
 
-// Lookup table for generators for runtime calls that are  generated inline.
-// Elements of the table are member pointers to functions of HGraphBuilder.
+// Lookup table for generators for runtime calls that are generated inline.
+// Elements of the table are member pointers to functions of
+// HOptimizedGraphBuilder.
 #define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize)  \
-    &HGraphBuilder::Generate##Name,
+    &HOptimizedGraphBuilder::Generate##Name,
 
-const HGraphBuilder::InlineFunctionGenerator
-    HGraphBuilder::kInlineFunctionGenerators[] = {
+const HOptimizedGraphBuilder::InlineFunctionGenerator
+    HOptimizedGraphBuilder::kInlineFunctionGenerators[] = {
         INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
         INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
 };
 #undef INLINE_FUNCTION_GENERATOR_ADDRESS
 
 
-void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -7913,7 +7959,7 @@
 }
 
 
-void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -7929,7 +7975,7 @@
   }
 }
 
-void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
   Property* prop = expr->expression()->AsProperty();
   VariableProxy* proxy = expr->expression()->AsVariableProxy();
   if (prop != NULL) {
@@ -7964,13 +8010,13 @@
 }
 
 
-void HGraphBuilder::VisitVoid(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitVoid(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForEffect(expr->expression()));
   return ast_context()->ReturnValue(graph()->GetConstantUndefined());
 }
 
 
-void HGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForTypeOf(expr->expression()));
   HValue* value = Pop();
   HValue* context = environment()->LookupContext();
@@ -7979,22 +8025,22 @@
 }
 
 
-void HGraphBuilder::VisitAdd(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitAdd(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->expression()));
   HValue* value = Pop();
   HValue* context = environment()->LookupContext();
   HInstruction* instr =
-      new(zone()) HMul(context, value, graph_->GetConstant1());
+      new(zone()) HMul(context, value, graph()->GetConstant1());
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
 
-void HGraphBuilder::VisitSub(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->expression()));
   HValue* value = Pop();
   HValue* context = environment()->LookupContext();
   HInstruction* instr =
-      new(zone()) HMul(context, value, graph_->GetConstantMinus1());
+      new(zone()) HMul(context, value, graph()->GetConstantMinus1());
   TypeInfo info = oracle()->UnaryType(expr);
   Representation rep = ToRepresentation(info);
   if (info.IsUninitialized()) {
@@ -8007,7 +8053,7 @@
 }
 
 
-void HGraphBuilder::VisitBitNot(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->expression()));
   HValue* value = Pop();
   TypeInfo info = oracle()->UnaryType(expr);
@@ -8020,7 +8066,7 @@
 }
 
 
-void HGraphBuilder::VisitNot(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitNot(UnaryOperation* expr) {
   if (ast_context()->IsTest()) {
     TestContext* context = TestContext::cast(ast_context());
     VisitForControl(expr->expression(),
@@ -8064,8 +8110,9 @@
 }
 
 
-HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
-                                            CountOperation* expr) {
+HInstruction* HOptimizedGraphBuilder::BuildIncrement(
+    bool returns_original_input,
+    CountOperation* expr) {
   // The input to the count operation is on top of the expression stack.
   TypeInfo info = oracle()->IncrementType(expr);
   Representation rep = ToRepresentation(info);
@@ -8087,8 +8134,8 @@
   // to simulate the expression stack after this instruction.
   // Any later failures deopt to the load of the input or earlier.
   HConstant* delta = (expr->op() == Token::INC)
-      ? graph_->GetConstant1()
-      : graph_->GetConstantMinus1();
+      ? graph()->GetConstant1()
+      : graph()->GetConstantMinus1();
   HValue* context = environment()->LookupContext();
   HInstruction* instr = new(zone()) HAdd(context, Top(), delta);
   // We can't insert a simulate here, because it would break deoptimization,
@@ -8101,7 +8148,7 @@
 }
 
 
-void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
+void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -8185,7 +8232,7 @@
 
     if (prop->key()->IsPropertyName()) {
       // Named property.
-      if (returns_original_input) Push(graph_->GetConstantUndefined());
+      if (returns_original_input) Push(graph()->GetConstantUndefined());
 
       CHECK_ALIVE(VisitForValue(prop->obj()));
       HValue* object = Top();
@@ -8218,7 +8265,7 @@
       input = Pop();
 
       HInstruction* store;
-      if (!monomorphic) {
+      if (!monomorphic || map->is_observed()) {
         // If we don't know the monomorphic type, do a generic store.
         CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, after));
       } else {
@@ -8246,7 +8293,7 @@
 
     } else {
       // Keyed property.
-      if (returns_original_input) Push(graph_->GetConstantUndefined());
+      if (returns_original_input) Push(graph()->GetConstantUndefined());
 
       CHECK_ALIVE(VisitForValue(prop->obj()));
       CHECK_ALIVE(VisitForValue(prop->key()));
@@ -8286,9 +8333,10 @@
 }
 
 
-HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* context,
-                                                        HValue* string,
-                                                        HValue* index) {
+HStringCharCodeAt* HOptimizedGraphBuilder::BuildStringCharCodeAt(
+    HValue* context,
+    HValue* string,
+    HValue* index) {
   AddInstruction(new(zone()) HCheckNonSmi(string));
   AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
   HStringLength* length = new(zone()) HStringLength(string);
@@ -8316,10 +8364,10 @@
 // directions that can be replaced by one rotate right instruction or not.
 // Returns the operand and the shift amount for the rotate instruction in the
 // former case.
-bool HGraphBuilder::MatchRotateRight(HValue* left,
-                                     HValue* right,
-                                     HValue** operand,
-                                     HValue** shift_amount) {
+bool HOptimizedGraphBuilder::MatchRotateRight(HValue* left,
+                                              HValue* right,
+                                              HValue** operand,
+                                              HValue** shift_amount) {
   HShl* shl;
   HShr* shr;
   if (left->IsShl() && right->IsShr()) {
@@ -8354,9 +8402,10 @@
 }
 
 
-HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
-                                                  HValue* left,
-                                                  HValue* right) {
+HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
+    BinaryOperation* expr,
+    HValue* left,
+    HValue* right) {
   HValue* context = environment()->LookupContext();
   TypeInfo left_info, right_info, result_info, combined_info;
   oracle()->BinaryType(expr, &left_info, &right_info, &result_info);
@@ -8449,7 +8498,7 @@
 }
 
 
-void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -8465,7 +8514,7 @@
 }
 
 
-void HGraphBuilder::VisitComma(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitComma(BinaryOperation* expr) {
   CHECK_ALIVE(VisitForEffect(expr->left()));
   // Visit the right subexpression in the same AST context as the entire
   // expression.
@@ -8473,7 +8522,7 @@
 }
 
 
-void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
   bool is_logical_and = expr->op() == Token::AND;
   if (ast_context()->IsTest()) {
     TestContext* context = TestContext::cast(ast_context());
@@ -8563,7 +8612,7 @@
 }
 
 
-void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->left()));
   CHECK_ALIVE(VisitForValue(expr->right()));
   HValue* right = Pop();
@@ -8574,7 +8623,7 @@
 }
 
 
-Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
+Representation HOptimizedGraphBuilder::ToRepresentation(TypeInfo info) {
   if (info.IsUninitialized()) return Representation::None();
   if (info.IsSmi()) return Representation::Integer32();
   if (info.IsInteger32()) return Representation::Integer32();
@@ -8584,9 +8633,9 @@
 }
 
 
-void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
-                                               HTypeof* typeof_expr,
-                                               Handle<String> check) {
+void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
+                                                        HTypeof* typeof_expr,
+                                                        Handle<String> check) {
   // Note: The HTypeof itself is removed during canonicalization, if possible.
   HValue* value = typeof_expr->value();
   HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
@@ -8656,7 +8705,7 @@
 }
 
 
-void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -8809,9 +8858,9 @@
 }
 
 
-void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
-                                            HValue* value,
-                                            NilValue nil) {
+void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
+                                                     HValue* value,
+                                                     NilValue nil) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -8823,7 +8872,7 @@
 }
 
 
-HInstruction* HGraphBuilder::BuildThisFunction() {
+HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
   // If we share optimized code between different closures, the
   // this-function is not a constant, except inside an inlined body.
   if (function_state()->outer() != NULL) {
@@ -8836,7 +8885,7 @@
 }
 
 
-void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
@@ -8845,7 +8894,8 @@
 }
 
 
-void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
+void HOptimizedGraphBuilder::VisitDeclarations(
+    ZoneList<Declaration*>* declarations) {
   ASSERT(globals_.is_empty());
   AstVisitor::VisitDeclarations(declarations);
   if (!globals_.is_empty()) {
@@ -8863,7 +8913,8 @@
 }
 
 
-void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitVariableDeclaration(
+    VariableDeclaration* declaration) {
   VariableProxy* proxy = declaration->proxy();
   VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
@@ -8900,7 +8951,8 @@
 }
 
 
-void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitFunctionDeclaration(
+    FunctionDeclaration* declaration) {
   VariableProxy* proxy = declaration->proxy();
   Variable* variable = proxy->var();
   switch (variable->location()) {
@@ -8938,49 +8990,52 @@
 }
 
 
-void HGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitModuleDeclaration(
+    ModuleDeclaration* declaration) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitImportDeclaration(ImportDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitImportDeclaration(
+    ImportDeclaration* declaration) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitExportDeclaration(ExportDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitExportDeclaration(
+    ExportDeclaration* declaration) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
+void HOptimizedGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
+void HOptimizedGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitModulePath(ModulePath* module) {
+void HOptimizedGraphBuilder::VisitModulePath(ModulePath* module) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
+void HOptimizedGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
+void HOptimizedGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
   UNREACHABLE();
 }
 
 
 // Generators for inline runtime functions.
 // Support for types.
-void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -8989,7 +9044,7 @@
 }
 
 
-void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9001,7 +9056,7 @@
 }
 
 
-void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9011,7 +9066,7 @@
 }
 
 
-void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9021,7 +9076,7 @@
 }
 
 
-void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9031,7 +9086,7 @@
 }
 
 
-void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9041,7 +9096,7 @@
 }
 
 
-void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9050,12 +9105,12 @@
 }
 
 
-void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
   return Bailout("inlined runtime function: IsNonNegativeSmi");
 }
 
 
-void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9065,7 +9120,7 @@
 }
 
 
-void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
+void HOptimizedGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
     CallRuntime* call) {
   return Bailout(
       "inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
@@ -9073,7 +9128,7 @@
 
 
 // Support for construct call checks.
-void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 0);
   if (function_state()->outer() != NULL) {
     // We are generating graph for inlined function.
@@ -9089,7 +9144,7 @@
 
 
 // Support for arguments.length and arguments[?].
-void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
   // Our implementation of arguments (based on this stack frame or an
   // adapter below it) does not work for inlined functions.  This runtime
   // function is blacklisted by AstNode::IsInlineable.
@@ -9102,7 +9157,7 @@
 }
 
 
-void HGraphBuilder::GenerateArguments(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
   // Our implementation of arguments (based on this stack frame or an
   // adapter below it) does not work for inlined functions.  This runtime
   // function is blacklisted by AstNode::IsInlineable.
@@ -9122,14 +9177,14 @@
 
 
 // Support for accessing the class and value fields of an object.
-void HGraphBuilder::GenerateClassOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateClassOf(CallRuntime* call) {
   // The special form detected by IsClassOfTest is detected before we get here
   // and does not cause a bailout.
   return Bailout("inlined runtime function: ClassOf");
 }
 
 
-void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9138,7 +9193,7 @@
 }
 
 
-void HGraphBuilder::GenerateDateField(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral());
   Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->handle()));
@@ -9149,7 +9204,7 @@
 }
 
 
-void HGraphBuilder::GenerateOneByteSeqStringSetChar(
+void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
     CallRuntime* call) {
   ASSERT(call->arguments()->length() == 3);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -9164,7 +9219,7 @@
 }
 
 
-void HGraphBuilder::GenerateTwoByteSeqStringSetChar(
+void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
     CallRuntime* call) {
   ASSERT(call->arguments()->length() == 3);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -9182,7 +9237,7 @@
 }
 
 
-void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9225,7 +9280,7 @@
 
 
 // Fast support for charCodeAt(n).
-void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9238,7 +9293,7 @@
 
 
 // Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* char_code = Pop();
@@ -9250,7 +9305,7 @@
 
 
 // Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9266,7 +9321,7 @@
 
 
 // Fast support for object equality testing.
-void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9278,14 +9333,14 @@
 }
 
 
-void HGraphBuilder::GenerateLog(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) {
   // %_Log is ignored in optimized code.
   return ast_context()->ReturnValue(graph()->GetConstantUndefined());
 }
 
 
 // Fast support for Math.random().
-void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
   HValue* context = environment()->LookupContext();
   HGlobalObject* global_object = new(zone()) HGlobalObject(context);
   AddInstruction(global_object);
@@ -9295,7 +9350,7 @@
 
 
 // Fast support for StringAdd.
-void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9306,7 +9361,7 @@
 
 
 // Fast support for SubString.
-void HGraphBuilder::GenerateSubString(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
   ASSERT_EQ(3, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9317,7 +9372,7 @@
 
 
 // Fast support for StringCompare.
-void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9329,7 +9384,7 @@
 
 
 // Support for direct calls from JavaScript to native RegExp code.
-void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
   ASSERT_EQ(4, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9340,7 +9395,7 @@
 
 
 // Construct a RegExp exec result with two in-object properties.
-void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
   ASSERT_EQ(3, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9352,13 +9407,13 @@
 
 
 // Support for fast native caches.
-void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
   return Bailout("inlined runtime function: GetFromCache");
 }
 
 
 // Fast support for number to string.
-void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9370,7 +9425,7 @@
 
 
 // Fast call for custom callbacks.
-void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
   // 1 ~ The function to call is not itself an argument to the call.
   int arg_count = call->arguments()->length() - 1;
   ASSERT(arg_count >= 1);  // There's always at least a receiver.
@@ -9414,7 +9469,7 @@
 
 
 // Fast call to math functions.
-void HGraphBuilder::GenerateMathPow(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9425,7 +9480,7 @@
 }
 
 
-void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9437,7 +9492,7 @@
 }
 
 
-void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9449,7 +9504,7 @@
 }
 
 
-void HGraphBuilder::GenerateMathTan(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9461,7 +9516,7 @@
 }
 
 
-void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
   HValue* context = environment()->LookupContext();
@@ -9473,18 +9528,18 @@
 }
 
 
-void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
   return Bailout("inlined runtime function: MathSqrt");
 }
 
 
 // Check whether two RegExps are equivalent
-void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
   return Bailout("inlined runtime function: IsRegExpEquivalent");
 }
 
 
-void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
@@ -9493,7 +9548,7 @@
 }
 
 
-void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
   return Bailout("inlined runtime function: FastAsciiArrayJoin");
 }
 
@@ -9523,6 +9578,23 @@
 }
 
 
+HEnvironment::HEnvironment(Zone* zone)
+    : values_(0, zone),
+      assigned_variables_(0, zone),
+      frame_type_(STUB),
+      parameter_count_(0),
+      specials_count_(0),
+      local_count_(0),
+      outer_(NULL),
+      entry_(NULL),
+      pop_count_(0),
+      push_count_(0),
+      ast_id_(BailoutId::None()),
+      zone_(zone) {
+  Initialize(0, 0, 0);
+}
+
+
 HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone)
     : values_(0, zone),
       assigned_variables_(0, zone),
@@ -9790,11 +9862,17 @@
 }
 
 
-void HTracer::TraceCompilation(FunctionLiteral* function) {
+void HTracer::TraceCompilation(CompilationInfo* info) {
   Tag tag(this, "compilation");
-  Handle<String> name = function->debug_name();
-  PrintStringProperty("name", *name->ToCString());
-  PrintStringProperty("method", *name->ToCString());
+  if (info->IsOptimizing()) {
+    Handle<String> name = info->function()->debug_name();
+    PrintStringProperty("name", *name->ToCString());
+    PrintStringProperty("method", *name->ToCString());
+  } else {
+    CodeStub::Major major_key = info->code_stub()->MajorKey();
+    PrintStringProperty("name", CodeStub::MajorName(major_key, false));
+    PrintStringProperty("method", "stub");
+  }
   PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis()));
 }
 
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 98b05d1..0837bf9 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -429,7 +429,8 @@
   JS_CONSTRUCT,
   JS_GETTER,
   JS_SETTER,
-  ARGUMENTS_ADAPTOR
+  ARGUMENTS_ADAPTOR,
+  STUB
 };
 
 
@@ -440,6 +441,8 @@
                Handle<JSFunction> closure,
                Zone* zone);
 
+  explicit HEnvironment(Zone* zone);
+
   HEnvironment* arguments_environment() {
     return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
   }
@@ -636,7 +639,7 @@
 };
 
 
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
 
 enum ArgumentsAllowedFlag {
   ARGUMENTS_NOT_ALLOWED,
@@ -672,10 +675,10 @@
   bool is_for_typeof() { return for_typeof_; }
 
  protected:
-  AstContext(HGraphBuilder* owner, Expression::Context kind);
+  AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind);
   virtual ~AstContext();
 
-  HGraphBuilder* owner() const { return owner_; }
+  HOptimizedGraphBuilder* owner() const { return owner_; }
 
   inline Zone* zone() const;
 
@@ -686,7 +689,7 @@
 #endif
 
  private:
-  HGraphBuilder* owner_;
+  HOptimizedGraphBuilder* owner_;
   Expression::Context kind_;
   AstContext* outer_;
   bool for_typeof_;
@@ -695,7 +698,7 @@
 
 class EffectContext: public AstContext {
  public:
-  explicit EffectContext(HGraphBuilder* owner)
+  explicit EffectContext(HOptimizedGraphBuilder* owner)
       : AstContext(owner, Expression::kEffect) {
   }
   virtual ~EffectContext();
@@ -708,7 +711,7 @@
 
 class ValueContext: public AstContext {
  public:
-  explicit ValueContext(HGraphBuilder* owner, ArgumentsAllowedFlag flag)
+  ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
       : AstContext(owner, Expression::kValue), flag_(flag) {
   }
   virtual ~ValueContext();
@@ -726,7 +729,7 @@
 
 class TestContext: public AstContext {
  public:
-  TestContext(HGraphBuilder* owner,
+  TestContext(HOptimizedGraphBuilder* owner,
               Expression* condition,
               TypeFeedbackOracle* oracle,
               HBasicBlock* if_true,
@@ -766,7 +769,7 @@
 
 class FunctionState {
  public:
-  FunctionState(HGraphBuilder* owner,
+  FunctionState(HOptimizedGraphBuilder* owner,
                 CompilationInfo* info,
                 TypeFeedbackOracle* oracle,
                 InliningKind inlining_kind);
@@ -796,7 +799,7 @@
   bool arguments_pushed() { return arguments_elements() != NULL; }
 
  private:
-  HGraphBuilder* owner_;
+  HOptimizedGraphBuilder* owner_;
 
   CompilationInfo* compilation_info_;
   TypeFeedbackOracle* oracle_;
@@ -828,7 +831,65 @@
 };
 
 
-class HGraphBuilder: public AstVisitor {
+class HGraphBuilder {
+ public:
+  explicit HGraphBuilder(CompilationInfo* info)
+      : info_(info), graph_(NULL), current_block_(NULL) {}
+  virtual ~HGraphBuilder() {}
+
+  HBasicBlock* current_block() const { return current_block_; }
+  void set_current_block(HBasicBlock* block) { current_block_ = block; }
+  HEnvironment* environment() const {
+    return current_block()->last_environment();
+  }
+  Zone* zone() const { return info_->zone(); }
+  HGraph* graph() { return graph_; }
+
+  HGraph* CreateGraph();
+
+  // Adding instructions.
+  HInstruction* AddInstruction(HInstruction* instr);
+  void AddSimulate(BailoutId id,
+                   RemovableSimulate removable = FIXED_SIMULATE);
+
+ protected:
+  virtual bool BuildGraph() = 0;
+
+  // Building common constructs
+  HInstruction* BuildExternalArrayElementAccess(
+      HValue* external_elements,
+      HValue* checked_key,
+      HValue* val,
+      HValue* dependency,
+      ElementsKind elements_kind,
+      bool is_store);
+
+  HInstruction* BuildFastElementAccess(
+      HValue* elements,
+      HValue* checked_key,
+      HValue* val,
+      HValue* dependency,
+      ElementsKind elements_kind,
+      bool is_store);
+
+  HInstruction* BuildUncheckedMonomorphicElementAccess(
+      HValue* object,
+      HValue* key,
+      HValue* val,
+      HCheckMaps* mapcheck,
+      bool is_js_array,
+      ElementsKind elements_kind,
+      bool is_store);
+
+ private:
+  HGraphBuilder();
+  CompilationInfo* info_;
+  HGraph* graph_;
+  HBasicBlock* current_block_;
+};
+
+
+class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
  public:
   enum BreakType { BREAK, CONTINUE };
   enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
@@ -864,7 +925,8 @@
   // structures mirroring BreakableStatement nesting.
   class BreakAndContinueScope BASE_EMBEDDED {
    public:
-    BreakAndContinueScope(BreakAndContinueInfo* info, HGraphBuilder* owner)
+    BreakAndContinueScope(BreakAndContinueInfo* info,
+                          HOptimizedGraphBuilder* owner)
         : info_(info), owner_(owner), next_(owner->break_scope()) {
       owner->set_break_scope(this);
     }
@@ -872,7 +934,7 @@
     ~BreakAndContinueScope() { owner_->set_break_scope(next_); }
 
     BreakAndContinueInfo* info() { return info_; }
-    HGraphBuilder* owner() { return owner_; }
+    HOptimizedGraphBuilder* owner() { return owner_; }
     BreakAndContinueScope* next() { return next_; }
 
     // Search the break stack for a break or continue target.
@@ -880,32 +942,20 @@
 
    private:
     BreakAndContinueInfo* info_;
-    HGraphBuilder* owner_;
+    HOptimizedGraphBuilder* owner_;
     BreakAndContinueScope* next_;
   };
 
-  HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
+  HOptimizedGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
 
-  HGraph* CreateGraph();
+  virtual bool BuildGraph();
 
   // Simple accessors.
-  HGraph* graph() const { return graph_; }
   BreakAndContinueScope* break_scope() const { return break_scope_; }
   void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
 
-  HBasicBlock* current_block() const { return current_block_; }
-  void set_current_block(HBasicBlock* block) { current_block_ = block; }
-  HEnvironment* environment() const {
-    return current_block()->last_environment();
-  }
-
   bool inline_bailout() { return inline_bailout_; }
 
-  // Adding instructions.
-  HInstruction* AddInstruction(HInstruction* instr);
-  void AddSimulate(BailoutId ast_id,
-                   RemovableSimulate removable = FIXED_SIMULATE);
-
   // Bailout environment manipulation.
   void Push(HValue* value) { environment()->Push(value); }
   HValue* Pop() { return environment()->Pop(); }
@@ -928,9 +978,12 @@
   void operator delete(void* pointer, Zone* zone) { }
   void operator delete(void* pointer) { }
 
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
  private:
   // Type of a member function that generates inline code for a native function.
-  typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
+  typedef void (HOptimizedGraphBuilder::*InlineFunctionGenerator)
+      (CallRuntime* call);
 
   // Forward declarations for inner scope classes.
   class SubgraphScope;
@@ -1139,25 +1192,14 @@
                                      HValue* right);
   HInstruction* BuildIncrement(bool returns_original_input,
                                CountOperation* expr);
-  HInstruction* BuildFastElementAccess(HValue* elements,
-                                       HValue* checked_key,
-                                       HValue* val,
-                                       HValue* dependency,
-                                       ElementsKind elements_kind,
-                                       bool is_store);
+  HInstruction* BuildLoadKeyedGeneric(HValue* object,
+                                      HValue* key);
 
   HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
                                                 HValue* key,
                                                 HValue* val,
                                                 SmallMapList* maps);
 
-  HInstruction* BuildUncheckedMonomorphicElementAccess(HValue* object,
-                                                       HValue* key,
-                                                       HValue* val,
-                                                       HCheckMaps* mapcheck,
-                                                       Handle<Map> map,
-                                                       bool is_store);
-
   HInstruction* BuildMonomorphicElementAccess(HValue* object,
                                               HValue* key,
                                               HValue* val,
@@ -1197,14 +1239,6 @@
                                           Handle<String> name,
                                           Property* expr,
                                           Handle<Map> map);
-  HInstruction* BuildLoadKeyedGeneric(HValue* object, HValue* key);
-  HInstruction* BuildExternalArrayElementAccess(
-      HValue* external_elements,
-      HValue* checked_key,
-      HValue* val,
-      HValue* dependency,
-      ElementsKind elements_kind,
-      bool is_store);
 
   void AddCheckMapsWithTransitions(HValue* object,
                                    Handle<Map> map);
@@ -1246,8 +1280,6 @@
                         HValue** operand,
                         HValue** shift_amount);
 
-  Zone* zone() const { return zone_; }
-
   // The translation state of the currently-being-translated function.
   FunctionState* function_state_;
 
@@ -1261,20 +1293,16 @@
   // A stack of breakable statements entered.
   BreakAndContinueScope* break_scope_;
 
-  HGraph* graph_;
-  HBasicBlock* current_block_;
-
   int inlined_count_;
   ZoneList<Handle<Object> > globals_;
 
-  Zone* zone_;
-
   bool inline_bailout_;
 
   friend class FunctionState;  // Pushes and pops the state stack.
   friend class AstContext;  // Pushes and pops the AST context stack.
+  friend class KeyedLoadFastElementStub;
 
-  DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
+  DISALLOW_COPY_AND_ASSIGN(HOptimizedGraphBuilder);
 };
 
 
@@ -1447,7 +1475,7 @@
 
 class HTracer: public Malloced {
  public:
-  void TraceCompilation(FunctionLiteral* function);
+  void TraceCompilation(CompilationInfo* info);
   void TraceHydrogen(const char* name, HGraph* graph);
   void TraceLithium(const char* name, LChunk* chunk);
   void TraceLiveRanges(const char* name, LAllocator* allocator);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 8cccaa5..f82defc 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -55,6 +55,33 @@
 uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
 
+int IntelDoubleRegister::NumAllocatableRegisters() {
+  if (CpuFeatures::IsSupported(SSE2)) {
+    return XMMRegister::kNumAllocatableRegisters;
+  } else {
+    return X87TopOfStackRegister::kNumAllocatableRegisters;
+  }
+}
+
+
+int IntelDoubleRegister::NumRegisters() {
+  if (CpuFeatures::IsSupported(SSE2)) {
+    return XMMRegister::kNumRegisters;
+  } else {
+    return X87TopOfStackRegister::kNumRegisters;
+  }
+}
+
+
+const char* IntelDoubleRegister::AllocationIndexToString(int index) {
+  if (CpuFeatures::IsSupported(SSE2)) {
+    return XMMRegister::AllocationIndexToString(index);
+  } else {
+    return X87TopOfStackRegister::AllocationIndexToString(index);
+  }
+}
+
+
 // The Probe method needs executable memory, so it uses Heap::CreateCode.
 // Allocation failure is silent and leads to safe default.
 void CpuFeatures::Probe() {
@@ -2199,7 +2226,8 @@
   EnsureSpace ensure_space(this);
   EMIT(0x0F);
   EMIT(0x18);
-  XMMRegister code = { level };  // Emit hint number in Reg position of RegR/M.
+  // Emit hint number in Reg position of RegR/M.
+  XMMRegister code = XMMRegister::from_code(level);
   emit_sse_operand(code, src);
 }
 
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index b1f421e..232a85e 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -65,7 +65,10 @@
 // and best performance in optimized code.
 //
 struct Register {
-  static const int kNumAllocatableRegisters = 6;
+  static const int kMaxNumAllocatableRegisters = 6;
+  static int NumAllocatableRegisters() {
+    return kMaxNumAllocatableRegisters;
+  }
   static const int kNumRegisters = 8;
 
   static inline const char* AllocationIndexToString(int index);
@@ -119,7 +122,7 @@
 
 
 inline const char* Register::AllocationIndexToString(int index) {
-  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
   // This is the mapping of allocation indices to registers.
   const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
   return kNames[index];
@@ -133,22 +136,69 @@
 
 
 inline Register Register::FromAllocationIndex(int index)  {
-  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
   return (index >= 4) ? from_code(index + 2) : from_code(index);
 }
 
 
-struct XMMRegister {
-  static const int kNumAllocatableRegisters = 7;
-  static const int kNumRegisters = 8;
+struct IntelDoubleRegister {
+  static const int kMaxNumAllocatableRegisters = 7;
+  static int NumAllocatableRegisters();
+  static int NumRegisters();
+  static const char* AllocationIndexToString(int index);
 
-  static int ToAllocationIndex(XMMRegister reg) {
+  static int ToAllocationIndex(IntelDoubleRegister reg) {
     ASSERT(reg.code() != 0);
     return reg.code() - 1;
   }
 
+  static IntelDoubleRegister FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < NumAllocatableRegisters());
+    return from_code(index + 1);
+  }
+
+  static IntelDoubleRegister from_code(int code) {
+    IntelDoubleRegister result = { code };
+    return result;
+  }
+
+  bool is_valid() const {
+    return 0 <= code_ && code_ < NumRegisters();
+  }
+  int code() const {
+    ASSERT(is_valid());
+    return code_;
+  }
+
+  int code_;
+};
+
+
+const IntelDoubleRegister double_register_0 = { 0 };
+const IntelDoubleRegister double_register_1 = { 1 };
+const IntelDoubleRegister double_register_2 = { 2 };
+const IntelDoubleRegister double_register_3 = { 3 };
+const IntelDoubleRegister double_register_4 = { 4 };
+const IntelDoubleRegister double_register_5 = { 5 };
+const IntelDoubleRegister double_register_6 = { 6 };
+const IntelDoubleRegister double_register_7 = { 7 };
+
+
+struct XMMRegister : IntelDoubleRegister {
+  static const int kNumAllocatableRegisters = 7;
+  static const int kNumRegisters = 8;
+
+  static XMMRegister from_code(int code) {
+    STATIC_ASSERT(sizeof(XMMRegister) == sizeof(IntelDoubleRegister));
+    XMMRegister result;
+    result.code_ = code;
+    return result;
+  }
+
+  bool is(XMMRegister reg) const { return code_ == reg.code_; }
+
   static XMMRegister FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < NumAllocatableRegisters());
     return from_code(index + 1);
   }
 
@@ -165,34 +215,46 @@
     };
     return names[index];
   }
-
-  static XMMRegister from_code(int code) {
-    XMMRegister r = { code };
-    return r;
-  }
-
-  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
-  bool is(XMMRegister reg) const { return code_ == reg.code_; }
-  int code() const {
-    ASSERT(is_valid());
-    return code_;
-  }
-
-  int code_;
 };
 
 
-const XMMRegister xmm0 = { 0 };
-const XMMRegister xmm1 = { 1 };
-const XMMRegister xmm2 = { 2 };
-const XMMRegister xmm3 = { 3 };
-const XMMRegister xmm4 = { 4 };
-const XMMRegister xmm5 = { 5 };
-const XMMRegister xmm6 = { 6 };
-const XMMRegister xmm7 = { 7 };
+#define xmm0 (static_cast<const XMMRegister&>(double_register_0))
+#define xmm1 (static_cast<const XMMRegister&>(double_register_1))
+#define xmm2 (static_cast<const XMMRegister&>(double_register_2))
+#define xmm3 (static_cast<const XMMRegister&>(double_register_3))
+#define xmm4 (static_cast<const XMMRegister&>(double_register_4))
+#define xmm5 (static_cast<const XMMRegister&>(double_register_5))
+#define xmm6 (static_cast<const XMMRegister&>(double_register_6))
+#define xmm7 (static_cast<const XMMRegister&>(double_register_7))
 
 
-typedef XMMRegister DoubleRegister;
+struct X87TopOfStackRegister : IntelDoubleRegister {
+  static const int kNumAllocatableRegisters = 1;
+  static const int kNumRegisters = 1;
+
+  bool is(X87TopOfStackRegister reg) const {
+    return code_ == reg.code_;
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "st0",
+    };
+    return names[index];
+  }
+
+  static int ToAllocationIndex(X87TopOfStackRegister reg) {
+    ASSERT(reg.code() == 0);
+    return 0;
+  }
+};
+
+#define x87tos \
+  static_cast<const X87TopOfStackRegister&>(double_register_0)
+
+
+typedef IntelDoubleRegister DoubleRegister;
 
 
 enum Condition {
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 01785bb..cadff49 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -574,6 +574,25 @@
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
 
 
+void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ pushad();
+    __ CallRuntime(Runtime::kNotifyICMiss, 0);
+    __ popad();
+    // Tear down internal frame.
+  }
+
+  __ pop(MemOperand(esp, 0));  // Ignore state offset
+  __ ret(0);  // Return to IC Miss stub, continuation still on stack.
+}
+
+
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
   {
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index da8e2ae..6114da9 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -40,6 +40,18 @@
 namespace v8 {
 namespace internal {
 
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { edx, ecx };
+  descriptor->register_param_count_ = 2;
+  descriptor->register_params_ = registers;
+  descriptor->deoptimization_handler_ =
+      isolate->builtins()->KeyedLoadIC_Miss();
+}
+
+
 #define __ ACCESS_MASM(masm)
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
@@ -1745,12 +1757,8 @@
           }
           // Check result type if it is currently Int32.
           if (result_type_ <= BinaryOpIC::INT32) {
-            __ cvttsd2si(ecx, Operand(xmm0));
-            __ cvtsi2sd(xmm2, ecx);
-            __ pcmpeqd(xmm2, xmm0);
-            __ movmskpd(ecx, xmm2);
-            __ test(ecx, Immediate(1));
-            __ j(zero, &not_int32);
+            FloatingPointHelper::CheckSSE2OperandIsInt32(
+                masm, &not_int32, xmm0, ecx, xmm2);
           }
           BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -2426,6 +2434,7 @@
 
     __ bind(&loaded);
   } else {  // UNTAGGED.
+    CpuFeatures::Scope scope(SSE2);
     if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope sse4_scope(SSE4_1);
       __ pextrd(edx, xmm1, 0x1);  // copy xmm1[63..32] to edx.
@@ -2498,6 +2507,7 @@
     __ fstp(0);
     __ ret(kPointerSize);
   } else {  // UNTAGGED.
+    CpuFeatures::Scope scope(SSE2);
     __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();
   }
@@ -2510,6 +2520,7 @@
   if (tagged) {
     __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
   } else {  // UNTAGGED.
+    CpuFeatures::Scope scope(SSE2);
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
     __ sub(esp, Immediate(kDoubleSize));
     __ movdbl(Operand(esp, 0), xmm1);
@@ -2524,6 +2535,7 @@
   if (tagged) {
     __ ret(kPointerSize);
   } else {  // UNTAGGED.
+    CpuFeatures::Scope scope(SSE2);
     __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();
 
@@ -2556,6 +2568,7 @@
         ExternalReference(RuntimeFunction(), masm->isolate());
     __ TailCallExternalReference(runtime, 1, 1);
   } else {  // UNTAGGED.
+    CpuFeatures::Scope scope(SSE2);
     __ bind(&runtime_call_clear_stack);
     __ bind(&runtime_call);
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
@@ -4808,10 +4821,17 @@
 
 
 void CodeStub::GenerateFPStubs() {
-  CEntryStub save_doubles(1, kSaveFPRegs);
-  Handle<Code> code = save_doubles.GetCode();
-  code->set_is_pregenerated(true);
-  code->GetIsolate()->set_fp_stubs_generated(true);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CEntryStub save_doubles(1, kSaveFPRegs);
+    // Stubs might already be in the snapshot, detect that and don't regenerate,
+    // which would lead to code stub initialization state being messed up.
+    Code* save_doubles_code;
+    if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
+      save_doubles_code = *(save_doubles.GetCode());
+    }
+    save_doubles_code->set_is_pregenerated(true);
+    save_doubles_code->GetIsolate()->set_fp_stubs_generated(true);
+  }
 }
 
 
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 29c16e1..4f8c81f 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -38,7 +38,7 @@
 
 // Compute a transcendental math function natively, or call the
 // TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
  public:
   enum ArgumentType {
     TAGGED = 0,
@@ -61,7 +61,7 @@
 };
 
 
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
  public:
   explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
       : save_doubles_(save_fp) { }
@@ -80,7 +80,7 @@
 };
 
 
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
  public:
   UnaryOpStub(Token::Value op,
               UnaryOverwriteMode mode,
@@ -225,7 +225,7 @@
 };
 
 
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
  public:
   explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
 
@@ -247,7 +247,7 @@
 };
 
 
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
  public:
   SubStringStub() {}
 
@@ -259,7 +259,7 @@
 };
 
 
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
  public:
   StringCompareStub() { }
 
@@ -295,7 +295,7 @@
 };
 
 
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
  public:
   NumberToStringStub() { }
 
@@ -320,7 +320,7 @@
 };
 
 
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
  public:
   enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
 
@@ -382,7 +382,7 @@
 };
 
 
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
  public:
   RecordWriteStub(Register object,
                   Register value,
@@ -585,7 +585,7 @@
     Register GetRegThatIsNotEcxOr(Register r1,
                                   Register r2,
                                   Register r3) {
-      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+      for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
         Register candidate = Register::FromAllocationIndex(i);
         if (candidate.is(ecx)) continue;
         if (candidate.is(r1)) continue;
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index c9ecacc..3233c27 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -114,17 +114,19 @@
 }
 
 
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
-  if (!function->IsOptimized()) return;
+void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
+    JSFunction* function) {
+  Isolate* isolate = function->GetIsolate();
+  HandleScope scope(isolate);
+  AssertNoAllocation no_allocation;
+
+  ASSERT(function->IsOptimized());
+  ASSERT(function->FunctionsInFunctionListShareSameCode());
 
   // The optimized code is going to be patched, so we cannot use it
   // any more.  Play safe and reset the whole cache.
   function->shared()->ClearOptimizedCodeMap();
 
-  Isolate* isolate = function->GetIsolate();
-  HandleScope scope(isolate);
-  AssertNoAllocation no_allocation;
-
   // Get the optimized code.
   Code* code = function->code();
   Address code_start_address = code->instruction_start();
@@ -295,7 +297,7 @@
 
 void Deoptimizer::DoComputeOsrOutputFrame() {
   DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      optimized_code_->deoptimization_data());
+      compiled_code_->deoptimization_data());
   unsigned ast_id = data->OsrAstId()->value();
   // TODO(kasperl): This should not be the bailout_id_. It should be
   // the ast id. Confusing.
@@ -332,7 +334,7 @@
   unsigned input_frame_size = input_->GetFrameSize();
   ASSERT(fixed_size + height_in_bytes == input_frame_size);
 
-  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+  unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
   unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
   unsigned outgoing_size = outgoing_height * kPointerSize;
   unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -443,7 +445,7 @@
 
     unsigned pc_offset = data->OsrPcOffset()->value();
     uint32_t pc = reinterpret_cast<uint32_t>(
-        optimized_code_->entry() + pc_offset);
+        compiled_code_->entry() + pc_offset);
     output_[0]->SetPc(pc);
   }
   Code* continuation =
@@ -557,6 +559,70 @@
 }
 
 
+void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
+                                      int frame_index) {
+  //
+  //               FROM                                  TO             <-ebp
+  //    |          ....           |          |          ....           |
+  //    +-------------------------+          +-------------------------+
+  //    | JSFunction continuation |          | JSFunction continuation |
+  //    +-------------------------+          +-------------------------+<-esp
+  // |  |   saved frame (ebp)     |
+  // |  +=========================+<-ebp
+  // |  |   JSFunction context    |
+  // v  +-------------------------+
+  //    |   COMPILED_STUB marker  |          ebp = saved frame
+  //    +-------------------------+          esi = JSFunction context
+  //    |                         |
+  //    | ...                     |
+  //    |                         |
+  //    +-------------------------+<-esp
+  //
+  //
+  int output_frame_size = 1 * kPointerSize;
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, 0);
+  Code* notify_miss =
+      isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
+  output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+  output_frame->SetContinuation(
+      reinterpret_cast<uint32_t>(notify_miss->entry()));
+
+  ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+  int major_key = compiled_code_->major_key();
+  CodeStubInterfaceDescriptor* descriptor =
+      isolate_->code_stub_interface_descriptor(major_key);
+  Handle<Code> miss_ic(descriptor->deoptimization_handler_);
+  output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
+  unsigned input_frame_size = input_->GetFrameSize();
+  intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
+  output_frame->SetFrameSlot(0, value);
+  value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
+  output_frame->SetRegister(ebp.code(), value);
+  output_frame->SetFp(value);
+  value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
+  output_frame->SetRegister(esi.code(), value);
+
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  ASSERT(opcode == Translation::REGISTER);
+  USE(opcode);
+  int input_reg = iterator->Next();
+  intptr_t input_value = input_->GetRegister(input_reg);
+  output_frame->SetRegister(edx.code(), input_value);
+
+  int32_t next = iterator->Next();
+  opcode = static_cast<Translation::Opcode>(next);
+  ASSERT(opcode == Translation::REGISTER);
+  input_reg = iterator->Next();
+  input_value = input_->GetRegister(input_reg);
+  output_frame->SetRegister(ecx.code(), input_value);
+
+  ASSERT(frame_index == 0);
+  output_[frame_index] = output_frame;
+}
+
+
 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
                                               int frame_index) {
   Builtins* builtins = isolate_->builtins();
@@ -985,7 +1051,7 @@
   }
   input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
   input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
     input_->SetDoubleRegister(i, 0.0);
   }
 
@@ -1000,7 +1066,6 @@
 
 void Deoptimizer::EntryGenerator::Generate() {
   GeneratePrologue();
-  CpuFeatures::Scope scope(SSE2);
 
   Isolate* isolate = masm()->isolate();
 
@@ -1010,10 +1075,13 @@
   const int kDoubleRegsSize = kDoubleSize *
                               XMMRegister::kNumAllocatableRegisters;
   __ sub(esp, Immediate(kDoubleRegsSize));
-  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
-    XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
-    int offset = i * kDoubleSize;
-    __ movdbl(Operand(esp, offset), xmm_reg);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope scope(SSE2);
+    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+      XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+      int offset = i * kDoubleSize;
+      __ movdbl(Operand(esp, offset), xmm_reg);
+    }
   }
 
   __ pushad();
@@ -1061,14 +1129,18 @@
     __ pop(Operand(ebx, offset));
   }
 
-  // Fill in the double input registers.
   int double_regs_offset = FrameDescription::double_registers_offset();
-  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
-    int dst_offset = i * kDoubleSize + double_regs_offset;
-    int src_offset = i * kDoubleSize;
-    __ movdbl(xmm0, Operand(esp, src_offset));
-    __ movdbl(Operand(ebx, dst_offset), xmm0);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope scope(SSE2);
+    // Fill in the double input registers.
+    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+      int dst_offset = i * kDoubleSize + double_regs_offset;
+      int src_offset = i * kDoubleSize;
+      __ movdbl(xmm0, Operand(esp, src_offset));
+      __ movdbl(Operand(ebx, dst_offset), xmm0);
+    }
   }
+  __ fninit();
 
   // Remove the bailout id and the double registers from the stack.
   if (type() == EAGER) {
@@ -1086,10 +1158,13 @@
   // limit and copy the contents of the activation frame to the input
   // frame description.
   __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
+  Label pop_loop_header;
+  __ jmp(&pop_loop_header);
   Label pop_loop;
   __ bind(&pop_loop);
   __ pop(Operand(edx, 0));
   __ add(edx, Immediate(sizeof(uint32_t)));
+  __ bind(&pop_loop_header);
   __ cmp(ecx, esp);
   __ j(not_equal, &pop_loop);
 
@@ -1127,31 +1202,39 @@
   }
 
   // Replace the current frame with the output frames.
-  Label outer_push_loop, inner_push_loop;
+  Label outer_push_loop, inner_push_loop,
+      outer_loop_header, inner_loop_header;
   // Outer loop state: eax = current FrameDescription**, edx = one past the
   // last FrameDescription**.
   __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
   __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
   __ lea(edx, Operand(eax, edx, times_4, 0));
+  __ jmp(&outer_loop_header);
   __ bind(&outer_push_loop);
   // Inner loop state: ebx = current FrameDescription*, ecx = loop index.
   __ mov(ebx, Operand(eax, 0));
   __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+  __ jmp(&inner_loop_header);
   __ bind(&inner_push_loop);
   __ sub(ecx, Immediate(sizeof(uint32_t)));
   __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
+  __ bind(&inner_loop_header);
   __ test(ecx, ecx);
   __ j(not_zero, &inner_push_loop);
   __ add(eax, Immediate(kPointerSize));
+  __ bind(&outer_loop_header);
   __ cmp(eax, edx);
   __ j(below, &outer_push_loop);
 
   // In case of OSR, we have to restore the XMM registers.
   if (type() == OSR) {
-    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
-      XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
-      int src_offset = i * kDoubleSize + double_regs_offset;
-      __ movdbl(xmm_reg, Operand(ebx, src_offset));
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope scope(SSE2);
+      for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+        XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+        int src_offset = i * kDoubleSize + double_regs_offset;
+        __ movdbl(xmm_reg, Operand(ebx, src_offset));
+      }
     }
   }
 
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 50713b5..95e95d4 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1977,7 +1977,7 @@
 
 
 void FullCodeGenerator::EmitAssignment(Expression* expr) {
-  // Invalid left-hand sides are rewritten to have a 'throw
+  // Invalid left-hand sides are rewritten by the parser to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
     VisitForEffect(expr);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index e03f733..bc3eed5 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -30,6 +30,7 @@
 #if defined(V8_TARGET_ARCH_IA32)
 
 #include "ia32/lithium-codegen-ia32.h"
+#include "ic.h"
 #include "code-stubs.h"
 #include "deoptimizer.h"
 #include "stub-cache.h"
@@ -70,7 +71,6 @@
   HPhase phase("Z_Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
-  CpuFeatures::Scope scope(SSE2);
 
   CodeStub::GenerateFPStubs();
 
@@ -79,13 +79,15 @@
   // the frame (that is done in GeneratePrologue).
   FrameScope frame_scope(masm_, StackFrame::MANUAL);
 
-  dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
-                              !chunk()->graph()->is_recursive()) ||
-                             !info()->osr_ast_id().IsNone();
+  dynamic_frame_alignment_ = info()->IsOptimizing() &&
+      ((chunk()->num_double_slots() > 2 &&
+        !chunk()->graph()->is_recursive()) ||
+       !info()->osr_ast_id().IsNone());
 
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
+      GenerateJumpTable() &&
       GenerateSafepointTable();
 }
 
@@ -95,7 +97,9 @@
   code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
-  Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+  if (!info()->IsStub()) {
+    Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+  }
 }
 
 
@@ -126,113 +130,126 @@
 bool LCodeGen::GeneratePrologue() {
   ASSERT(is_generating());
 
-  ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
 #ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-    __ int3();
-  }
+    if (strlen(FLAG_stop_at) > 0 &&
+        info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+      __ int3();
+    }
 #endif
 
-  // Strict mode functions and builtins need to replace the receiver
-  // with undefined when called as functions (without an explicit
-  // receiver object). ecx is zero for method calls and non-zero for
-  // function calls.
-  if (!info_->is_classic_mode() || info_->is_native()) {
-    Label ok;
-    __ test(ecx, Operand(ecx));
-    __ j(zero, &ok, Label::kNear);
-    // +1 for return address.
-    int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
-    __ mov(Operand(esp, receiver_offset),
-           Immediate(isolate()->factory()->undefined_value()));
-    __ bind(&ok);
-  }
+    // Strict mode functions and builtins need to replace the receiver
+    // with undefined when called as functions (without an explicit
+    // receiver object). ecx is zero for method calls and non-zero for
+    // function calls.
+    if (!info_->is_classic_mode() || info_->is_native()) {
+      Label ok;
+      __ test(ecx, Operand(ecx));
+      __ j(zero, &ok, Label::kNear);
+      // +1 for return address.
+      int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+      __ mov(Operand(esp, receiver_offset),
+             Immediate(isolate()->factory()->undefined_value()));
+      __ bind(&ok);
+    }
 
+    if (dynamic_frame_alignment_) {
+      // Move state of dynamic frame alignment into edx.
+      __ mov(edx, Immediate(kNoAlignmentPadding));
 
-  if (dynamic_frame_alignment_) {
-    // Move state of dynamic frame alignment into edx.
-    __ mov(edx, Immediate(kNoAlignmentPadding));
+      Label do_not_pad, align_loop;
+      STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+      // Align esp + 4 to a multiple of 2 * kPointerSize.
+      __ test(esp, Immediate(kPointerSize));
+      __ j(not_zero, &do_not_pad, Label::kNear);
+      __ push(Immediate(0));
+      __ mov(ebx, esp);
+      __ mov(edx, Immediate(kAlignmentPaddingPushed));
+      // Copy arguments, receiver, and return address.
+      __ mov(ecx, Immediate(scope()->num_parameters() + 2));
 
-    Label do_not_pad, align_loop;
-    STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
-    // Align esp + 4 to a multiple of 2 * kPointerSize.
-    __ test(esp, Immediate(kPointerSize));
-    __ j(not_zero, &do_not_pad, Label::kNear);
-    __ push(Immediate(0));
-    __ mov(ebx, esp);
-    __ mov(edx, Immediate(kAlignmentPaddingPushed));
-    // Copy arguments, receiver, and return address.
-    __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
-    __ bind(&align_loop);
-    __ mov(eax, Operand(ebx, 1 * kPointerSize));
-    __ mov(Operand(ebx, 0), eax);
-    __ add(Operand(ebx), Immediate(kPointerSize));
-    __ dec(ecx);
-    __ j(not_zero, &align_loop, Label::kNear);
-    __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
-    __ bind(&do_not_pad);
+      __ bind(&align_loop);
+      __ mov(eax, Operand(ebx, 1 * kPointerSize));
+      __ mov(Operand(ebx, 0), eax);
+      __ add(Operand(ebx), Immediate(kPointerSize));
+      __ dec(ecx);
+      __ j(not_zero, &align_loop, Label::kNear);
+      __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+      __ bind(&do_not_pad);
+    }
   }
 
   info()->set_prologue_offset(masm_->pc_offset());
-  __ push(ebp);  // Caller's frame pointer.
-  __ mov(ebp, esp);
-  __ push(esi);  // Callee's context.
-  __ push(edi);  // Callee's JS function.
+  if (NeedsEagerFrame()) {
+    ASSERT(!frame_is_built_);
+    frame_is_built_ = true;
+    __ push(ebp);  // Caller's frame pointer.
+    __ mov(ebp, esp);
+    __ push(esi);  // Callee's context.
+    if (info()->IsStub()) {
+      __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+    } else {
+      __ push(edi);  // Callee's JS function.
+    }
+  }
 
-  if (dynamic_frame_alignment_ && FLAG_debug_code) {
+  if (info()->IsOptimizing() &&
+      dynamic_frame_alignment_ &&
+      FLAG_debug_code) {
     __ test(esp, Immediate(kPointerSize));
     __ Assert(zero, "frame is expected to be aligned");
   }
 
   // Reserve space for the stack slots needed by the code.
   int slots = GetStackSlotCount();
-  ASSERT_GE(slots, 1);
-  if (slots == 1) {
-    if (dynamic_frame_alignment_) {
-      __ push(edx);
-    } else {
-      __ push(Immediate(kNoAlignmentPadding));
-    }
-  } else {
-    if (FLAG_debug_code) {
-      __ mov(Operand(eax), Immediate(slots));
-      Label loop;
-      __ bind(&loop);
-      __ push(Immediate(kSlotsZapValue));
-      __ dec(eax);
-      __ j(not_zero, &loop);
-    } else {
-      __ sub(Operand(esp), Immediate(slots * kPointerSize));
-  #ifdef _MSC_VER
-      // On windows, you may not access the stack more than one page below
-      // the most recently mapped page. To make the allocated area randomly
-      // accessible, we write to each page in turn (the value is irrelevant).
-      const int kPageSize = 4 * KB;
-      for (int offset = slots * kPointerSize - kPageSize;
-           offset > 0;
-           offset -= kPageSize) {
-        __ mov(Operand(esp, offset), eax);
+  ASSERT(slots != 0 || !info()->IsOptimizing());
+  if (slots > 0) {
+    if (slots == 1) {
+      if (dynamic_frame_alignment_) {
+        __ push(edx);
+      } else {
+        __ push(Immediate(kNoAlignmentPadding));
       }
-  #endif
-    }
-
-    // Store dynamic frame alignment state in the first local.
-    if (dynamic_frame_alignment_) {
-      __ mov(Operand(ebp,
-                     JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
-             edx);
     } else {
-      __ mov(Operand(ebp,
-                     JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
-             Immediate(kNoAlignmentPadding));
+      if (FLAG_debug_code) {
+        __ mov(Operand(eax), Immediate(slots));
+        Label loop;
+        __ bind(&loop);
+        __ push(Immediate(kSlotsZapValue));
+        __ dec(eax);
+        __ j(not_zero, &loop);
+      } else {
+        __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+        // On windows, you may not access the stack more than one page below
+        // the most recently mapped page. To make the allocated area randomly
+        // accessible, we write to each page in turn (the value is irrelevant).
+        const int kPageSize = 4 * KB;
+        for (int offset = slots * kPointerSize - kPageSize;
+             offset > 0;
+             offset -= kPageSize) {
+          __ mov(Operand(esp, offset), eax);
+        }
+#endif
+      }
+
+      // Store dynamic frame alignment state in the first local.
+      if (dynamic_frame_alignment_) {
+        __ mov(Operand(ebp,
+                       JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+               edx);
+      } else {
+        __ mov(Operand(ebp,
+                       JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+               Immediate(kNoAlignmentPadding));
+      }
     }
   }
 
   // Possibly allocate a local context.
-  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment(";;; Allocate local context");
     // Argument to NewContext is the function, which is still in edi.
@@ -272,7 +289,7 @@
   }
 
   // Trace the call.
-  if (FLAG_trace) {
+  if (FLAG_trace && info()->IsOptimizing()) {
     // We have not executed any compiled code yet, so esi still holds the
     // incoming context.
     __ CallRuntime(Runtime::kTraceEnter, 0);
@@ -326,16 +343,102 @@
 }
 
 
+bool LCodeGen::GenerateJumpTable() {
+  Label needs_frame_not_call;
+  Label needs_frame_is_call;
+  for (int i = 0; i < jump_table_.length(); i++) {
+    __ bind(&jump_table_[i].label);
+    Address entry = jump_table_[i].address;
+    if (jump_table_[i].needs_frame) {
+      __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
+      if (jump_table_[i].is_lazy_deopt) {
+        if (needs_frame_is_call.is_bound()) {
+          __ jmp(&needs_frame_is_call);
+        } else {
+          __ bind(&needs_frame_is_call);
+          __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
+          // This variant of deopt can only be used with stubs. Since we don't
+          // have a function pointer to install in the stack frame that we're
+          // building, install a special marker there instead.
+          ASSERT(info()->IsStub());
+          __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+          // Push a PC inside the function so that the deopt code can find where
+          // the deopt comes from. It doesn't have to be the precise return
+          // address of a "calling" LAZY deopt, it only has to be somewhere
+          // inside the code body.
+          Label push_approx_pc;
+          __ call(&push_approx_pc);
+          __ bind(&push_approx_pc);
+          // Push the continuation which was stashed were the ebp should
+          // be. Replace it with the saved ebp.
+          __ push(MemOperand(esp, 3 * kPointerSize));
+          __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
+          __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+          __ ret(0);  // Call the continuation without clobbering registers.
+        }
+      } else {
+        if (needs_frame_not_call.is_bound()) {
+          __ jmp(&needs_frame_not_call);
+        } else {
+          __ bind(&needs_frame_not_call);
+          __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
+          // This variant of deopt can only be used with stubs. Since we don't
+          // have a function pointer to install in the stack frame that we're
+          // building, install a special marker there instead.
+          ASSERT(info()->IsStub());
+          __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+          // Push the continuation which was stashed were the ebp should
+          // be. Replace it with the saved ebp.
+          __ push(MemOperand(esp, 2 * kPointerSize));
+          __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
+          __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
+          __ ret(0);  // Call the continuation without clobbering registers.
+        }
+      }
+    } else {
+      if (jump_table_[i].is_lazy_deopt) {
+        __ call(entry, RelocInfo::RUNTIME_ENTRY);
+      } else {
+        __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+      }
+    }
+  }
+  return !is_aborted();
+}
+
+
 bool LCodeGen::GenerateDeferredCode() {
   ASSERT(is_generating());
   if (deferred_.length() > 0) {
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Deferred build frame",
+                code->instruction_index(),
+                code->instr()->Mnemonic());
+        ASSERT(!frame_is_built_);
+        ASSERT(info()->IsStub());
+        frame_is_built_ = true;
+        // Build the frame in such a way that esi isn't trashed.
+        __ push(ebp);  // Caller's frame pointer.
+        __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
+        __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+        __ lea(ebp, Operand(esp, 2 * kPointerSize));
+      }
       Comment(";;; Deferred code @%d: %s.",
               code->instruction_index(),
               code->instr()->Mnemonic());
       code->Generate();
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Deferred destroy frame",
+                code->instruction_index(),
+                code->instr()->Mnemonic());
+        ASSERT(frame_is_built_);
+        frame_is_built_ = false;
+        __ mov(esp, ebp);
+        __ pop(ebp);
+      }
       __ jmp(code->exit());
     }
   }
@@ -349,6 +452,15 @@
 
 bool LCodeGen::GenerateSafepointTable() {
   ASSERT(is_done());
+  if (!info()->IsStub()) {
+    // For lazy deoptimization we need space to patch a call after every call.
+    // Ensure there is always space for such patching, even if the code ends
+    // in a call.
+    int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
+    while (masm()->pc_offset() < target_offset) {
+      masm()->nop();
+    }
+  }
   safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
@@ -364,6 +476,11 @@
 }
 
 
+bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
+  return op->IsDoubleRegister();
+}
+
+
 Register LCodeGen::ToRegister(LOperand* op) const {
   ASSERT(op->IsRegister());
   return ToRegister(op->index());
@@ -449,7 +566,9 @@
                    translation,
                    arguments_index,
                    arguments_count);
-  int closure_id = *info()->closure() != *environment->closure()
+  bool has_closure_id = !info()->closure().is_null() &&
+      *info()->closure() != *environment->closure();
+  int closure_id = has_closure_id
       ? DefineDeoptimizationLiteral(environment->closure())
       : Translation::kSelfLiteralId;
   switch (environment->frame_type()) {
@@ -472,6 +591,11 @@
     case ARGUMENTS_ADAPTOR:
       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
       break;
+    case STUB:
+      translation->BeginCompiledStubFrame();
+      break;
+    default:
+      UNREACHABLE();
   }
 
   // Inlined frames which push their arguments cause the index to be
@@ -606,6 +730,8 @@
   __ CallRuntime(fun, argc);
 
   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+
+  ASSERT(info()->is_calling());
 }
 
 
@@ -630,6 +756,8 @@
   __ CallRuntimeSaveDoubles(id);
   RecordSafepointWithRegisters(
       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+
+  ASSERT(info()->is_calling());
 }
 
 
@@ -675,7 +803,11 @@
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+  ASSERT(info()->IsOptimizing() || info()->IsStub());
+  Deoptimizer::BailoutType bailout_type = frame_is_built_
+      ? Deoptimizer::EAGER
+      : Deoptimizer::LAZY;
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
   if (entry == NULL) {
     Abort("bailout was not prepared");
     return;
@@ -709,19 +841,44 @@
     __ popfd();
   }
 
+  ASSERT(info()->IsStub() || frame_is_built_);
+  bool lazy_deopt_needed = info()->IsStub();
   if (cc == no_condition) {
     if (FLAG_trap_on_deopt) __ int3();
-    __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+    if (lazy_deopt_needed) {
+      __ call(entry, RelocInfo::RUNTIME_ENTRY);
+    } else {
+      __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+    }
   } else {
+    Label done;
     if (FLAG_trap_on_deopt) {
-      Label done;
       __ j(NegateCondition(cc), &done, Label::kNear);
       __ int3();
-      __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-      __ bind(&done);
-    } else {
-      __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
     }
+    if (!lazy_deopt_needed && frame_is_built_) {
+      if (FLAG_trap_on_deopt) {
+        __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+      } else {
+        __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
+      }
+    } else {
+      // We often have several deopts to the same entry, reuse the last
+      // jump entry if this is the case.
+      if (jump_table_.is_empty() ||
+          jump_table_.last().address != entry ||
+          jump_table_.last().needs_frame != !frame_is_built_ ||
+          jump_table_.last().is_lazy_deopt != lazy_deopt_needed) {
+        JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt_needed);
+        jump_table_.Add(table_entry, zone());
+      }
+      if (FLAG_trap_on_deopt) {
+        __ jmp(&jump_table_.last().label);
+      } else {
+        __ j(cc, &jump_table_.last().label);
+      }
+    }
+    __ bind(&done);
   }
 }
 
@@ -1011,6 +1168,17 @@
 
     // Slow case, using idiv instruction.
     __ bind(&slow);
+
+    // Check for (kMinInt % -1).
+    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+      Label left_not_min_int;
+      __ cmp(left_reg, kMinInt);
+      __ j(not_zero, &left_not_min_int, Label::kNear);
+      __ cmp(right_reg, -1);
+      DeoptimizeIf(zero, instr->environment());
+      __ bind(&left_not_min_int);
+    }
+
     // Sign extend to edx.
     __ cdq();
 
@@ -1459,7 +1627,8 @@
     int32_t lower = static_cast<int32_t>(int_val);
     int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
     if (CpuFeatures::IsSupported(SSE4_1)) {
-      CpuFeatures::Scope scope(SSE4_1);
+      CpuFeatures::Scope scope1(SSE2);
+      CpuFeatures::Scope scope2(SSE4_1);
       if (lower != 0) {
         __ Set(temp, Immediate(lower));
         __ movd(res, Operand(temp));
@@ -1471,6 +1640,7 @@
         __ pinsrd(res, Operand(temp), 1);
       }
     } else {
+      CpuFeatures::Scope scope(SSE2);
       __ Set(temp, Immediate(upper));
       __ movd(res, Operand(temp));
       __ psllq(res, 32);
@@ -1633,6 +1803,7 @@
 
 
 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+  CpuFeatures::Scope scope(SSE2);
   LOperand* left = instr->left();
   LOperand* right = instr->right();
   ASSERT(left->Equals(instr->result()));
@@ -1694,6 +1865,7 @@
 
 
 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister left = ToDoubleRegister(instr->left());
   XMMRegister right = ToDoubleRegister(instr->right());
   XMMRegister result = ToDoubleRegister(instr->result());
@@ -1704,8 +1876,8 @@
       __ addsd(left, right);
       break;
     case Token::SUB:
-       __ subsd(left, right);
-       break;
+      __ subsd(left, right);
+      break;
     case Token::MUL:
       __ mulsd(left, right);
       break;
@@ -1778,6 +1950,7 @@
 void LCodeGen::DoBranch(LBranch* instr) {
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
+  CpuFeatures::Scope scope(SSE2);
 
   Representation r = instr->hydrogen()->value()->representation();
   if (r.IsInteger32()) {
@@ -1937,6 +2110,7 @@
   int false_block = chunk_->LookupDestination(instr->false_block_id());
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   Condition cc = TokenToCondition(instr->op(), instr->is_double());
+  CpuFeatures::Scope scope(SSE2);
 
   if (left->IsConstantOperand() && right->IsConstantOperand()) {
     // We can statically evaluate the comparison.
@@ -2446,7 +2620,7 @@
 
 
 void LCodeGen::DoReturn(LReturn* instr) {
-  if (FLAG_trace) {
+  if (FLAG_trace && info()->IsOptimizing()) {
     // Preserve the return value on the stack and rely on the runtime call
     // to return the value in the same register.  We're leaving the code
     // managed by the register allocator and tearing down the frame, it's
@@ -2460,8 +2634,10 @@
     __ mov(edx, Operand(ebp,
       JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
   }
-  __ mov(esp, ebp);
-  __ pop(ebp);
+  if (NeedsEagerFrame()) {
+    __ mov(esp, ebp);
+    __ pop(ebp);
+  }
   if (dynamic_frame_alignment_) {
     Label no_padding;
     __ cmp(edx, Immediate(kNoAlignmentPadding));
@@ -2474,7 +2650,12 @@
     __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
     __ bind(&no_padding);
   }
-  __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
+  if (info()->IsStub()) {
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ Ret();
+  } else {
+    __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
+  }
 }
 
 
@@ -2850,11 +3031,23 @@
       0,
       instr->additional_index()));
   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
-    XMMRegister result(ToDoubleRegister(instr->result()));
-    __ movss(result, operand);
-    __ cvtss2sd(result, result);
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope scope(SSE2);
+      XMMRegister result(ToDoubleRegister(instr->result()));
+      __ movss(result, operand);
+      __ cvtss2sd(result, result);
+    } else {
+      __ fld_s(operand);
+      HandleX87FPReturnValue(instr);
+    }
   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-    __ movdbl(ToDoubleRegister(instr->result()), operand);
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope scope(SSE2);
+      __ movdbl(ToDoubleRegister(instr->result()), operand);
+    } else {
+      __ fld_d(operand);
+      HandleX87FPReturnValue(instr);
+    }
   } else {
     Register result(ToRegister(instr->result()));
     switch (elements_kind) {
@@ -2898,9 +3091,30 @@
 }
 
 
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
-  XMMRegister result = ToDoubleRegister(instr->result());
+void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) {
+  if (IsX87TopOfStack(instr->result())) {
+    // Return value is already on stack. If the value has no uses, then
+    // pop it off the FP stack. Otherwise, make sure that there are enough
+    // copies of the value on the stack to feed all of the usages, e.g.
+    // when the following instruction uses the return value in multiple
+    // inputs.
+    int count = instr->hydrogen_value()->UseCount();
+    if (count == 0) {
+      __ fstp(0);
+    } else {
+      count--;
+      ASSERT(count <= 7);
+      while (count-- > 0) {
+        __ fld(0);
+      }
+    }
+  } else {
+    __ fstp_d(ToOperand(instr->result()));
+  }
+}
 
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   if (instr->hydrogen()->RequiresHoleCheck()) {
     int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
         sizeof(kHoleNanLower32);
@@ -2921,7 +3135,14 @@
       FAST_DOUBLE_ELEMENTS,
       FixedDoubleArray::kHeaderSize - kHeapObjectTag,
       instr->additional_index());
-  __ movdbl(result, double_load_operand);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope scope(SSE2);
+    XMMRegister result = ToDoubleRegister(instr->result());
+    __ movdbl(result, double_load_operand);
+  } else {
+    __ fld_d(double_load_operand);
+    HandleX87FPReturnValue(instr);
+  }
 }
 
 
@@ -3337,6 +3558,7 @@
   ASSERT(instr->value()->Equals(instr->result()));
   Representation r = instr->hydrogen()->value()->representation();
 
+  CpuFeatures::Scope scope(SSE2);
   if (r.IsDouble()) {
     XMMRegister  scratch = xmm0;
     XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3358,6 +3580,7 @@
 
 
 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister xmm_scratch = xmm0;
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3422,6 +3645,7 @@
 }
 
 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister xmm_scratch = xmm0;
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3467,6 +3691,7 @@
 
 
 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister input_reg = ToDoubleRegister(instr->value());
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
   __ sqrtsd(input_reg, input_reg);
@@ -3474,6 +3699,7 @@
 
 
 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister xmm_scratch = xmm0;
   XMMRegister input_reg = ToDoubleRegister(instr->value());
   Register scratch = ToRegister(instr->temp());
@@ -3550,6 +3776,7 @@
 
   DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
 
+  CpuFeatures::Scope scope(SSE2);
   // Having marked this instruction as a call we can use any
   // registers.
   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
@@ -3617,6 +3844,7 @@
 
 
 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+  CpuFeatures::Scope scope(SSE2);
   ASSERT(instr->value()->Equals(instr->result()));
   XMMRegister input_reg = ToDoubleRegister(instr->value());
   Label positive, done, zero;
@@ -3648,6 +3876,7 @@
 
 
 void LCodeGen::DoMathExp(LMathExp* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister input = ToDoubleRegister(instr->value());
   XMMRegister result = ToDoubleRegister(instr->result());
   Register temp1 = ToRegister(instr->temp1());
@@ -3916,6 +4145,11 @@
     }
     DeoptimizeIf(below_equal, instr->environment());
   } else {
+    if (instr->hydrogen()->index()->representation().IsTagged() &&
+        !instr->hydrogen()->index()->type().IsSmi()) {
+      __ test(ToRegister(instr->index()), Immediate(kSmiTagMask));
+      DeoptimizeIf(not_zero, instr->environment());
+    }
     __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
     DeoptimizeIf(above_equal, instr->environment());
   }
@@ -3938,9 +4172,11 @@
       0,
       instr->additional_index()));
   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+    CpuFeatures::Scope scope(SSE2);
     __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
     __ movss(operand, xmm0);
   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    CpuFeatures::Scope scope(SSE2);
     __ movdbl(operand, ToDoubleRegister(instr->value()));
   } else {
     Register value = ToRegister(instr->value());
@@ -3976,6 +4212,7 @@
 
 
 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister value = ToDoubleRegister(instr->value());
 
   if (instr->NeedsCanonicalization()) {
@@ -4226,15 +4463,21 @@
 
 
 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
-  LOperand* input = instr->value();
-  ASSERT(input->IsRegister() || input->IsStackSlot());
-  LOperand* output = instr->result();
-  ASSERT(output->IsDoubleRegister());
-  __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope scope(SSE2);
+    LOperand* input = instr->value();
+    ASSERT(input->IsRegister() || input->IsStackSlot());
+    LOperand* output = instr->result();
+    ASSERT(output->IsDoubleRegister());
+    __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+  } else {
+    UNREACHABLE();
+  }
 }
 
 
 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+  CpuFeatures::Scope scope(SSE2);
   LOperand* input = instr->value();
   LOperand* output = instr->result();
   LOperand* temp = instr->temp();
@@ -4312,9 +4555,21 @@
     // the value in there. If that fails, call the runtime system.
     __ SmiUntag(reg);
     __ xor_(reg, 0x80000000);
-    __ cvtsi2sd(xmm0, Operand(reg));
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope feature_scope(SSE2);
+      __ cvtsi2sd(xmm0, Operand(reg));
+    } else {
+      __ push(reg);
+      __ fild_s(Operand(esp, 0));
+      __ pop(reg);
+    }
   } else {
-    __ LoadUint32(xmm0, reg, xmm1);
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope feature_scope(SSE2);
+      __ LoadUint32(xmm0, reg, xmm1);
+    } else {
+      UNREACHABLE();
+    }
   }
 
   if (FLAG_inline_new) {
@@ -4343,7 +4598,12 @@
   // Done. Put the value in xmm0 into the value of the allocated heap
   // number.
   __ bind(&done);
-  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope feature_scope(SSE2);
+    __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+  } else {
+    __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+  }
   __ StoreToSafepointRegisterSlot(reg, reg);
 }
 
@@ -4359,7 +4619,6 @@
     LNumberTagD* instr_;
   };
 
-  XMMRegister input_reg = ToDoubleRegister(instr->value());
   Register reg = ToRegister(instr->result());
   Register tmp = ToRegister(instr->temp());
 
@@ -4370,7 +4629,16 @@
     __ jmp(deferred->entry());
   }
   __ bind(deferred->exit());
-  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope scope(SSE2);
+    XMMRegister input_reg = ToDoubleRegister(instr->value());
+    __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+  } else {
+    if (!IsX87TopOfStack(instr->value())) {
+    __ fld_d(ToOperand(instr->value()));
+    }
+    __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+  }
 }
 
 
@@ -4514,6 +4782,7 @@
       __ mov(input_reg, Operand(esp, 0));  // Low word of answer is the result.
       __ add(Operand(esp), Immediate(kDoubleSize));
     } else {
+      CpuFeatures::Scope scope(SSE2);
       XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
       __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
       __ cvttsd2si(input_reg, Operand(xmm0));
@@ -4527,7 +4796,8 @@
       DeoptimizeIf(not_equal, instr->environment());
       DeoptimizeIf(parity_even, instr->environment());  // NaN.
     }
-  } else {
+  } else if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope scope(SSE2);
     // Deoptimize if we don't have a heap number.
     __ RecordComment("Deferred TaggedToI: not a heap number");
     DeoptimizeIf(not_equal, instr->environment());
@@ -4549,6 +4819,8 @@
       __ RecordComment("Deferred TaggedToI: minus zero");
       DeoptimizeIf(not_zero, instr->environment());
     }
+  } else {
+    UNREACHABLE();
   }
   __ bind(&done);
 }
@@ -4591,19 +4863,24 @@
   LOperand* result = instr->result();
   ASSERT(result->IsDoubleRegister());
 
-  Register input_reg = ToRegister(input);
-  XMMRegister result_reg = ToDoubleRegister(result);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope scope(SSE2);
+    Register input_reg = ToRegister(input);
+    XMMRegister result_reg = ToDoubleRegister(result);
 
-  bool deoptimize_on_minus_zero =
-      instr->hydrogen()->deoptimize_on_minus_zero();
-  Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
+    bool deoptimize_on_minus_zero =
+        instr->hydrogen()->deoptimize_on_minus_zero();
+    Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
 
-  EmitNumberUntagD(input_reg,
-                   temp_reg,
-                   result_reg,
-                   instr->hydrogen()->deoptimize_on_undefined(),
-                   deoptimize_on_minus_zero,
-                   instr->environment());
+    EmitNumberUntagD(input_reg,
+                     temp_reg,
+                     result_reg,
+                     instr->hydrogen()->deoptimize_on_undefined(),
+                     deoptimize_on_minus_zero,
+                     instr->environment());
+  } else {
+    UNIMPLEMENTED();
+  }
 }
 
 
@@ -4612,6 +4889,7 @@
   ASSERT(input->IsDoubleRegister());
   LOperand* result = instr->result();
   ASSERT(result->IsRegister());
+  CpuFeatures::Scope scope(SSE2);
 
   XMMRegister input_reg = ToDoubleRegister(input);
   Register result_reg = ToRegister(result);
@@ -4801,10 +5079,10 @@
 void LCodeGen::DoCheckMapCommon(Register reg,
                                 Handle<Map> map,
                                 CompareMapMode mode,
-                                LEnvironment* env) {
+                                LInstruction* instr) {
   Label success;
   __ CompareMap(reg, map, &success, mode);
-  DeoptimizeIf(not_equal, env);
+  DeoptimizeIf(not_equal, instr->environment());
   __ bind(&success);
 }
 
@@ -4822,12 +5100,13 @@
     __ j(equal, &success);
   }
   Handle<Map> map = map_set->last();
-  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
+  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
   __ bind(&success);
 }
 
 
 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  CpuFeatures::Scope scope(SSE2);
   XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
   Register result_reg = ToRegister(instr->result());
   __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
@@ -4842,6 +5121,8 @@
 
 
 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  CpuFeatures::Scope scope(SSE2);
+
   ASSERT(instr->unclamped()->Equals(instr->result()));
   Register input_reg = ToRegister(instr->unclamped());
   Label is_smi, done, heap_number;
@@ -4888,7 +5169,7 @@
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
     DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+                     ALLOW_ELEMENT_TRANSITION_MAPS, instr);
 
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
@@ -4898,7 +5179,7 @@
 
   // Check the holder map.
   DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                   ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+                   ALLOW_ELEMENT_TRANSITION_MAPS, instr);
 }
 
 
@@ -5435,13 +5716,15 @@
 
 
 void LCodeGen::EnsureSpaceForLazyDeopt() {
-  // Ensure that we have enough space after the previous lazy-bailout
-  // instruction for patching the code here.
-  int current_pc = masm()->pc_offset();
-  int patch_size = Deoptimizer::patch_size();
-  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
-    int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
-    __ Nop(padding_size);
+  if (!info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    int patch_size = Deoptimizer::patch_size();
+    if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+      int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+      __ Nop(padding_size);
+    }
   }
   last_lazy_deopt_pc_ = masm()->pc_offset();
 }
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 44ddaff..63d15f4 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -55,6 +55,7 @@
         current_instruction_(-1),
         instructions_(chunk->instructions()),
         deoptimizations_(4, info->zone()),
+        jump_table_(4, info->zone()),
         deoptimization_literals_(8, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
@@ -64,6 +65,7 @@
         dynamic_frame_alignment_(false),
         osr_pc_offset_(-1),
         last_lazy_deopt_pc_(0),
+        frame_is_built_(false),
         safepoints_(info->zone()),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
@@ -78,10 +80,20 @@
   Heap* heap() const { return isolate()->heap(); }
   Zone* zone() const { return zone_; }
 
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 ||
+        info()->is_non_deferred_calling() ||
+        !info()->IsStub();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
   // Support for converting LOperands to assembler types.
   Operand ToOperand(LOperand* op) const;
   Register ToRegister(LOperand* op) const;
   XMMRegister ToDoubleRegister(LOperand* op) const;
+  bool IsX87TopOfStack(LOperand* op) const;
 
   bool IsInteger32(LConstantOperand* op) const;
   Immediate ToInteger32Immediate(LOperand* op) const {
@@ -90,6 +102,9 @@
 
   Handle<Object> ToHandle(LConstantOperand* op) const;
 
+  // A utility for instructions that return floating point values on X87.
+  void HandleX87FPReturnValue(LInstruction* instr);
+
   // The operand denoting the second word (the one with a higher address) of
   // a double stack slot.
   Operand HighOperand(LOperand* op);
@@ -122,7 +137,7 @@
                                        Label* map_check);
 
   void DoCheckMapCommon(Register reg, Handle<Map> map,
-                        CompareMapMode mode, LEnvironment* env);
+                        CompareMapMode mode, LInstruction* instr);
 
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
@@ -172,7 +187,7 @@
                        Register temporary2);
 
   int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
-  int GetParameterCount() const { return scope()->num_parameters(); }
+  int GetParameterCount() const { return info()->num_parameters(); }
 
   void Abort(const char* reason);
   void Comment(const char* format, ...);
@@ -184,9 +199,7 @@
   bool GeneratePrologue();
   bool GenerateBody();
   bool GenerateDeferredCode();
-  // Pad the reloc info to ensure that we have enough space to patch during
-  // deoptimization.
-  bool GenerateRelocPadding();
+  bool GenerateJumpTable();
   bool GenerateSafepointTable();
 
   enum SafepointMode {
@@ -356,10 +369,23 @@
   MacroAssembler* const masm_;
   CompilationInfo* const info_;
 
+  struct JumpTableEntry {
+    inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
+        : label(),
+          address(entry),
+          needs_frame(frame),
+          is_lazy_deopt(is_lazy) { }
+    Label label;
+    Address address;
+    bool needs_frame;
+    bool is_lazy_deopt;
+  };
+
   int current_block_;
   int current_instruction_;
   const ZoneList<LInstruction*>* instructions_;
   ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<JumpTableEntry> jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
   int inlined_function_count_;
   Scope* const scope_;
@@ -369,6 +395,7 @@
   bool dynamic_frame_alignment_;
   int osr_pc_offset_;
   int last_lazy_deopt_pc_;
+  bool frame_is_built_;
 
   // Builder that keeps track of safepoints in the code. The table
   // itself is emitted at the end of the generated code.
@@ -386,6 +413,7 @@
       ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
       codegen_->masm_->PushSafepointRegisters();
       codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+      ASSERT(codegen_->info()->is_calling());
     }
 
     ~PushSafepointRegistersScope() {
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index 6428916..7cb15e6 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -191,7 +191,7 @@
 
 Register LGapResolver::GetFreeRegisterNot(Register reg) {
   int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
-  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
     if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
       return Register::FromAllocationIndex(i);
     }
@@ -204,7 +204,7 @@
   if (!moves_.is_empty()) return false;
   if (spilled_register_ >= 0) return false;
 
-  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
     if (source_uses_[i] != 0) return false;
     if (destination_uses_[i] != 0) return false;
   }
@@ -256,7 +256,7 @@
 
   // 3. Prefer to spill a register that is not used in any remaining move
   // because it will not need to be restored until the end.
-  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
     if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
       Register scratch = Register::FromAllocationIndex(i);
       __ push(scratch);
@@ -324,29 +324,38 @@
     }
 
   } else if (source->IsDoubleRegister()) {
-    XMMRegister src = cgen_->ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
-      XMMRegister dst = cgen_->ToDoubleRegister(destination);
-      __ movaps(dst, src);
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope scope(SSE2);
+      XMMRegister src = cgen_->ToDoubleRegister(source);
+      if (destination->IsDoubleRegister()) {
+        XMMRegister dst = cgen_->ToDoubleRegister(destination);
+        __ movaps(dst, src);
+      } else {
+        ASSERT(destination->IsDoubleStackSlot());
+        Operand dst = cgen_->ToOperand(destination);
+        __ movdbl(dst, src);
+      }
     } else {
-      ASSERT(destination->IsDoubleStackSlot());
-      Operand dst = cgen_->ToOperand(destination);
-      __ movdbl(dst, src);
+      UNREACHABLE();
     }
   } else if (source->IsDoubleStackSlot()) {
-    ASSERT(destination->IsDoubleRegister() ||
-           destination->IsDoubleStackSlot());
-    Operand src = cgen_->ToOperand(source);
-    if (destination->IsDoubleRegister()) {
-      XMMRegister dst = cgen_->ToDoubleRegister(destination);
-      __ movdbl(dst, src);
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope scope(SSE2);
+      ASSERT(destination->IsDoubleRegister() ||
+             destination->IsDoubleStackSlot());
+      Operand src = cgen_->ToOperand(source);
+      if (destination->IsDoubleRegister()) {
+        XMMRegister dst = cgen_->ToDoubleRegister(destination);
+        __ movdbl(dst, src);
+      } else {
+        // We rely on having xmm0 available as a fixed scratch register.
+        Operand dst = cgen_->ToOperand(destination);
+        __ movdbl(xmm0, src);
+        __ movdbl(dst, xmm0);
+      }
     } else {
-      // We rely on having xmm0 available as a fixed scratch register.
-      Operand dst = cgen_->ToOperand(destination);
-      __ movdbl(xmm0, src);
-      __ movdbl(dst, xmm0);
+      UNREACHABLE();
     }
-
   } else {
     UNREACHABLE();
   }
@@ -410,6 +419,7 @@
       __ mov(src, tmp0);
     }
   } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    CpuFeatures::Scope scope(SSE2);
     // XMM register-register swap. We rely on having xmm0
     // available as a fixed scratch register.
     XMMRegister src = cgen_->ToDoubleRegister(source);
diff --git a/src/ia32/lithium-gap-resolver-ia32.h b/src/ia32/lithium-gap-resolver-ia32.h
index 0c81d72..3a58f58 100644
--- a/src/ia32/lithium-gap-resolver-ia32.h
+++ b/src/ia32/lithium-gap-resolver-ia32.h
@@ -97,8 +97,8 @@
   ZoneList<LMoveOperands> moves_;
 
   // Source and destination use counts for the general purpose registers.
-  int source_uses_[Register::kNumAllocatableRegisters];
-  int destination_uses_[Register::kNumAllocatableRegisters];
+  int source_uses_[Register::kMaxNumAllocatableRegisters];
+  int destination_uses_[Register::kMaxNumAllocatableRegisters];
 
   // If we had to spill on demand, the currently spilled register's
   // allocation index.
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index d7ac7a8..6e07593 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -44,10 +44,10 @@
 #undef DEFINE_COMPILE
 
 LOsrEntry::LOsrEntry() {
-  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
     register_spills_[i] = NULL;
   }
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
     double_register_spills_[i] = NULL;
   }
 }
@@ -460,9 +460,11 @@
   status_ = BUILDING;
 
   // Reserve the first spill slot for the state of dynamic alignment.
-  int alignment_state_index = chunk_->GetNextSpillIndex(false);
-  ASSERT_EQ(alignment_state_index, 0);
-  USE(alignment_state_index);
+  if (info()->IsOptimizing()) {
+    int alignment_state_index = chunk_->GetNextSpillIndex(false);
+    ASSERT_EQ(alignment_state_index, 0);
+    USE(alignment_state_index);
+  }
 
   const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
   for (int i = 0; i < blocks->length(); i++) {
@@ -494,6 +496,12 @@
 }
 
 
+LUnallocated* LChunkBuilder::ToUnallocated(X87TopOfStackRegister reg) {
+  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+      X87TopOfStackRegister::ToAllocationIndex(reg));
+}
+
+
 LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
   return Use(value, ToUnallocated(fixed_register));
 }
@@ -626,6 +634,13 @@
 }
 
 
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineX87TOS(
+    LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr, ToUnallocated(x87tos));
+}
+
+
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
   int argument_index_accumulator = 0;
@@ -638,6 +653,8 @@
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
+
 #ifdef DEBUG
   instr->VerifyCall();
 #endif
@@ -1698,8 +1715,12 @@
 LInstruction* LChunkBuilder::DoChange(HChange* instr) {
   Representation from = instr->from();
   Representation to = instr->to();
+  // Only mark conversions that might need to allocate as calling rather than
+  // all changes. This makes simple, non-allocating conversion not have to force
+  // building a stack frame.
   if (from.IsTagged()) {
     if (to.IsDouble()) {
+      info()->MarkAsDeferredCalling();
       LOperand* value = UseRegister(instr->value());
       // Temp register only necessary for minus zero check.
       LOperand* temp = instr->deoptimize_on_minus_zero()
@@ -1724,7 +1745,10 @@
     }
   } else if (from.IsDouble()) {
     if (to.IsTagged()) {
-      LOperand* value = UseRegister(instr->value());
+      info()->MarkAsDeferredCalling();
+      LOperand* value = CpuFeatures::IsSupported(SSE2)
+          ? UseRegisterAtStart(instr->value())
+          : UseAtStart(instr->value());
       LOperand* temp = TempRegister();
 
       // Make sure that temp and result_temp are different registers.
@@ -1742,6 +1766,7 @@
           DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
     }
   } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
     if (to.IsTagged()) {
       HValue* val = instr->value();
       LOperand* value = UseRegister(val);
@@ -2258,8 +2283,17 @@
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
-  int spill_index = chunk()->GetParameterStackSlot(instr->index());
-  return DefineAsSpilled(new(zone()) LParameter, spill_index);
+  LParameter* result = new(zone()) LParameter;
+  if (info()->IsOptimizing()) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    ASSERT(info()->IsStub());
+    CodeStubInterfaceDescriptor* descriptor =
+        info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+    Register reg = descriptor->register_params_[instr->index()];
+    return DefineFixed(result, reg);
+  }
 }
 
 
@@ -2360,6 +2394,7 @@
 
 
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  info()->MarkAsDeferredCalling();
   if (instr->is_function_entry()) {
     LOperand* context = UseFixed(instr->context(), esi);
     return MarkAsCall(new(zone()) LStackCheck(context), instr);
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index cf85374..f4056c1 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -250,7 +250,11 @@
   void MarkAsCall() { is_call_ = true; }
 
   // Interface to the register allocator and iterators.
-  bool IsMarkedAsCall() const { return is_call_; }
+  bool ClobbersTemps() const { return is_call_; }
+  bool ClobbersRegisters() const { return is_call_; }
+  virtual bool ClobbersDoubleRegisters() const {
+    return is_call_ || !CpuFeatures::IsSupported(SSE2);
+  }
 
   virtual bool HasResult() const = 0;
   virtual LOperand* result() = 0;
@@ -356,6 +360,7 @@
 class LInstructionGap: public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+  virtual bool ClobbersDoubleRegisters() const { return false; }
 
   DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
 };
@@ -1438,7 +1443,6 @@
     inputs_[0] = elements;
     inputs_[1] = key;
   }
-
   LOperand* elements() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   ElementsKind elements_kind() const {
@@ -1448,11 +1452,18 @@
     return hydrogen()->is_external();
   }
 
+  virtual bool ClobbersDoubleRegisters() const {
+    return !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
 
   virtual void PrintDataTo(StringStream* stream);
   uint32_t additional_index() const { return hydrogen()->index_offset(); }
+  bool key_is_smi() {
+    return hydrogen()->key()->representation().IsTagged();
+  }
 };
 
 
@@ -2433,8 +2444,9 @@
   // slot, i.e., that must also be restored to the spill slot on OSR entry.
   // NULL if the register has no assigned spill slot.  Indexed by allocation
   // index.
-  LOperand* register_spills_[Register::kNumAllocatableRegisters];
-  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+  LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+  LOperand* double_register_spills_[
+      DoubleRegister::kMaxNumAllocatableRegisters];
 };
 
 
@@ -2598,6 +2610,7 @@
   // Methods for getting operands for Use / Define / Temp.
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(XMMRegister reg);
+  LUnallocated* ToUnallocated(X87TopOfStackRegister reg);
 
   // Methods for setting up define-use relationships.
   MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
@@ -2658,6 +2671,8 @@
   template<int I, int T>
       LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
                                       XMMRegister reg);
+  template<int I, int T>
+      LInstruction* DefineX87TOS(LTemplateInstruction<1, I, T>* instr);
   // Assigns an environment to an instruction.  An instruction which can
   // deoptimize must have an environment.
   LInstruction* AssignEnvironment(LInstruction* instr);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 14fb8ca..e9ce797 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1801,7 +1801,8 @@
   const Runtime::Function* function = Runtime::FunctionForId(id);
   Set(eax, Immediate(function->nargs));
   mov(ebx, Immediate(ExternalReference(function, isolate())));
-  CEntryStub ces(1, kSaveFPRegs);
+  CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs
+                                                   : kDontSaveFPRegs);
   CallStub(&ces);
 }
 
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 7abb29b..e7a5d28 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -924,9 +924,9 @@
   Operand SafepointRegisterSlot(Register reg);
   static int SafepointRegisterStackIndex(int reg_code);
 
-  // Needs access to SafepointRegisterStackIndex for optimized frame
+  // Needs access to SafepointRegisterStackIndex for compiled frame
   // traversal.
-  friend class OptimizedFrame;
+  friend class StandardFrame;
 };
 
 
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index c8695c5..7834627 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -3398,9 +3398,17 @@
   // -----------------------------------
 
   ElementsKind elements_kind = receiver_map->elements_kind();
-  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
-  __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+  if (receiver_map->has_fast_elements() ||
+      receiver_map->has_external_array_elements()) {
+    Handle<Code> stub = KeyedLoadFastElementStub(
+        receiver_map->instance_type() == JS_ARRAY_TYPE,
+        elements_kind).GetCode();
+    __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+  } else {
+    Handle<Code> stub =
+        KeyedLoadDictionaryElementStub().GetCode();
+    __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+  }
 
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -3661,157 +3669,6 @@
 }
 
 
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
-    MacroAssembler* masm,
-    ElementsKind elements_kind) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label miss_force_generic, failed_allocation, slow;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
-  // Check that the index is in range.
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
-  // Unsigned comparison catches both negative and too-large values.
-  __ j(above_equal, &miss_force_generic);
-  __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
-  // ebx: base pointer of external storage
-  switch (elements_kind) {
-    case EXTERNAL_BYTE_ELEMENTS:
-      __ SmiUntag(ecx);  // Untag the index.
-      __ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
-      break;
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-    case EXTERNAL_PIXEL_ELEMENTS:
-      __ SmiUntag(ecx);  // Untag the index.
-      __ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
-      break;
-    case EXTERNAL_SHORT_ELEMENTS:
-      __ movsx_w(eax, Operand(ebx, ecx, times_1, 0));
-      break;
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      __ movzx_w(eax, Operand(ebx, ecx, times_1, 0));
-      break;
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_INT_ELEMENTS:
-      __ mov(eax, Operand(ebx, ecx, times_2, 0));
-      break;
-    case EXTERNAL_FLOAT_ELEMENTS:
-      __ fld_s(Operand(ebx, ecx, times_2, 0));
-      break;
-    case EXTERNAL_DOUBLE_ELEMENTS:
-      __ fld_d(Operand(ebx, ecx, times_4, 0));
-      break;
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  // For integer array types:
-  // eax: value
-  // For floating-point array type:
-  // FP(0): value
-
-  if (elements_kind == EXTERNAL_INT_ELEMENTS ||
-      elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
-    // For the Int and UnsignedInt array types, we need to see whether
-    // the value can be represented in a Smi. If not, we need to convert
-    // it to a HeapNumber.
-    Label box_int;
-    if (elements_kind == EXTERNAL_INT_ELEMENTS) {
-      __ cmp(eax, 0xc0000000);
-      __ j(sign, &box_int);
-    } else {
-      ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
-      // The test is different for unsigned int values. Since we need
-      // the value to be in the range of a positive smi, we can't
-      // handle either of the top two bits being set in the value.
-      __ test(eax, Immediate(0xc0000000));
-      __ j(not_zero, &box_int);
-    }
-
-    __ SmiTag(eax);
-    __ ret(0);
-
-    __ bind(&box_int);
-
-    // Allocate a HeapNumber for the int and perform int-to-double
-    // conversion.
-    if (elements_kind == EXTERNAL_INT_ELEMENTS) {
-      __ push(eax);
-      __ fild_s(Operand(esp, 0));
-      __ pop(eax);
-    } else {
-      ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
-      // Need to zero-extend the value.
-      // There's no fild variant for unsigned values, so zero-extend
-      // to a 64-bit int manually.
-      __ push(Immediate(0));
-      __ push(eax);
-      __ fild_d(Operand(esp, 0));
-      __ pop(eax);
-      __ pop(eax);
-    }
-    // FP(0): value
-    __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
-    // Set the value.
-    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-    __ ret(0);
-  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
-             elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-    // For the floating-point array type, we need to always allocate a
-    // HeapNumber.
-    __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
-    // Set the value.
-    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-    __ ret(0);
-  } else {
-    __ SmiTag(eax);
-    __ ret(0);
-  }
-
-  // If we fail allocation of the HeapNumber, we still have a value on
-  // top of the FPU stack. Remove it.
-  __ bind(&failed_allocation);
-  __ fstp(0);
-  // Fall through to slow case.
-
-  // Slow case: Jump to runtime.
-  __ bind(&slow);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
-
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
-  __ jmp(ic, RelocInfo::CODE_TARGET);
-
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  // Miss case: Jump to runtime.
-  __ bind(&miss_force_generic);
-  Handle<Code> miss_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
 void KeyedStoreStubCompiler::GenerateStoreExternalArray(
     MacroAssembler* masm,
     ElementsKind elements_kind) {
@@ -4011,106 +3868,6 @@
 }
 
 
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label miss_force_generic;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
-  // Get the elements array.
-  __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
-  __ AssertFastElements(eax);
-
-  // Check that the key is within bounds.
-  __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
-  __ j(above_equal, &miss_force_generic);
-
-  // Load the result and make sure it's not the hole.
-  __ mov(ebx, Operand(eax, ecx, times_2,
-                      FixedArray::kHeaderSize - kHeapObjectTag));
-  __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
-  __ j(equal, &miss_force_generic);
-  __ mov(eax, ebx);
-  __ ret(0);
-
-  __ bind(&miss_force_generic);
-  Handle<Code> miss_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
-    MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label miss_force_generic, slow_allocate_heapnumber;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
-  // Get the elements array.
-  __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
-  __ AssertFastElements(eax);
-
-  // Check that the key is within bounds.
-  __ cmp(ecx, FieldOperand(eax, FixedDoubleArray::kLengthOffset));
-  __ j(above_equal, &miss_force_generic);
-
-  // Check for the hole
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmp(FieldOperand(eax, ecx, times_4, offset), Immediate(kHoleNanUpper32));
-  __ j(equal, &miss_force_generic);
-
-  // Always allocate a heap number for the result.
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    __ movdbl(xmm0, FieldOperand(eax, ecx, times_4,
-                                 FixedDoubleArray::kHeaderSize));
-  } else {
-    __ fld_d(FieldOperand(eax, ecx, times_4, FixedDoubleArray::kHeaderSize));
-  }
-  __ AllocateHeapNumber(eax, ebx, edi, &slow_allocate_heapnumber);
-  // Set the value.
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-  } else {
-    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-  }
-  __ ret(0);
-
-  __ bind(&slow_allocate_heapnumber);
-  // A value was pushed on the floating point stack before the allocation, if
-  // the allocation fails it needs to be removed.
-  if (!CpuFeatures::IsSupported(SSE2)) {
-    __ fstp(0);
-  }
-  Handle<Code> slow_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_Slow();
-  __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
-  __ bind(&miss_force_generic);
-  Handle<Code> miss_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
 void KeyedStoreStubCompiler::GenerateStoreFastElement(
     MacroAssembler* masm,
     bool is_js_array,
diff --git a/src/ic.cc b/src/ic.cc
index bf2a649..3633036 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1054,7 +1054,13 @@
     ElementsKind elements_kind,
     KeyedAccessGrowMode grow_mode) {
   ASSERT(grow_mode == DO_NOT_ALLOW_JSARRAY_GROWTH);
-  return KeyedLoadElementStub(elements_kind).GetCode();
+  if (IsFastElementsKind(elements_kind) ||
+      IsExternalArrayElementsKind(elements_kind)) {
+    return KeyedLoadFastElementStub(is_js_array, elements_kind).GetCode();
+  } else {
+    ASSERT(elements_kind == DICTIONARY_ELEMENTS);
+    return KeyedLoadDictionaryElementStub().GetCode();
+  }
 }
 
 
diff --git a/src/isolate.cc b/src/isolate.cc
index 15d0bdd..a64e01d 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -634,6 +634,7 @@
   }
   Handle<JSArray> result = factory()->NewJSArrayWithElements(elements);
   result->set_length(Smi::FromInt(cursor));
+  heap()->error_object_list()->Add(*error_object);
   return result;
 }
 
@@ -656,15 +657,21 @@
   int limit = Max(frame_limit, 0);
   Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
 
-  Handle<String> column_key = factory()->LookupAsciiSymbol("column");
-  Handle<String> line_key = factory()->LookupAsciiSymbol("lineNumber");
-  Handle<String> script_key = factory()->LookupAsciiSymbol("scriptName");
+  Handle<String> column_key =
+      factory()->LookupOneByteSymbol(STATIC_ASCII_VECTOR("column"));
+  Handle<String> line_key =
+      factory()->LookupOneByteSymbol(STATIC_ASCII_VECTOR("lineNumber"));
+  Handle<String> script_key =
+      factory()->LookupOneByteSymbol(STATIC_ASCII_VECTOR("scriptName"));
   Handle<String> script_name_or_source_url_key =
-      factory()->LookupAsciiSymbol("scriptNameOrSourceURL");
-  Handle<String> function_key = factory()->LookupAsciiSymbol("functionName");
-  Handle<String> eval_key = factory()->LookupAsciiSymbol("isEval");
+      factory()->LookupOneByteSymbol(
+          STATIC_ASCII_VECTOR("scriptNameOrSourceURL"));
+  Handle<String> function_key =
+      factory()->LookupOneByteSymbol(STATIC_ASCII_VECTOR("functionName"));
+  Handle<String> eval_key =
+      factory()->LookupOneByteSymbol(STATIC_ASCII_VECTOR("isEval"));
   Handle<String> constructor_key =
-      factory()->LookupAsciiSymbol("isConstructor");
+      factory()->LookupOneByteSymbol(STATIC_ASCII_VECTOR("isConstructor"));
 
   StackTraceFrameIterator it(this);
   int frames_seen = 0;
@@ -811,7 +818,7 @@
 void Isolate::PrintStack(StringStream* accumulator) {
   if (!IsInitialized()) {
     accumulator->Add(
-        "\n==== Stack trace is not available ==========================\n\n");
+        "\n==== JS stack trace is not available =======================\n\n");
     accumulator->Add(
         "\n==== Isolate for the thread is not initialized =============\n\n");
     return;
@@ -824,7 +831,7 @@
   if (c_entry_fp(thread_local_top()) == 0) return;
 
   accumulator->Add(
-      "\n==== Stack trace ============================================\n\n");
+      "\n==== JS stack trace =========================================\n\n");
   PrintFrames(accumulator, StackFrame::OVERVIEW);
 
   accumulator->Add(
@@ -1154,7 +1161,8 @@
 bool Isolate::IsErrorObject(Handle<Object> obj) {
   if (!obj->IsJSObject()) return false;
 
-  String* error_key = *(factory()->LookupAsciiSymbol("$Error"));
+  String* error_key =
+      *(factory()->LookupOneByteSymbol(STATIC_ASCII_VECTOR("$Error")));
   Object* error_constructor =
       js_builtins_object()->GetPropertyNoExceptionThrown(error_key);
 
@@ -1235,7 +1243,8 @@
         bool failed = false;
         exception_arg = Execution::ToDetailString(exception_arg, &failed);
         if (failed) {
-          exception_arg = factory()->LookupAsciiSymbol("exception");
+          exception_arg =
+              factory()->LookupOneByteSymbol(STATIC_ASCII_VECTOR("exception"));
         }
       }
       Handle<Object> message_obj = MessageHandler::MakeMessageObject(
@@ -1635,6 +1644,7 @@
       string_tracker_(NULL),
       regexp_stack_(NULL),
       date_cache_(NULL),
+      code_stub_interface_descriptors_(NULL),
       context_exit_happened_(false),
       deferred_handles_head_(NULL),
       optimizing_compiler_thread_(this) {
@@ -1797,6 +1807,9 @@
   delete date_cache_;
   date_cache_ = NULL;
 
+  delete[] code_stub_interface_descriptors_;
+  code_stub_interface_descriptors_ = NULL;
+
   delete regexp_stack_;
   regexp_stack_ = NULL;
 
@@ -1960,6 +1973,10 @@
   regexp_stack_ = new RegExpStack();
   regexp_stack_->isolate_ = this;
   date_cache_ = new DateCache();
+  code_stub_interface_descriptors_ =
+      new CodeStubInterfaceDescriptor[CodeStub::NUMBER_OF_IDS];
+  memset(code_stub_interface_descriptors_, 0,
+         kPointerSize * CodeStub::NUMBER_OF_IDS);
 
   // Enable logging before setting up the heap
   logger_->SetUp();
@@ -2020,6 +2037,8 @@
   debug_->SetUp(create_heap_objects);
 #endif
 
+  deoptimizer_data_ = new DeoptimizerData;
+
   // If we are deserializing, read the state into the now-empty heap.
   if (!create_heap_objects) {
     des->Deserialize();
@@ -2038,7 +2057,6 @@
   // Quiet the heap NaN if needed on target platform.
   if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
 
-  deoptimizer_data_ = new DeoptimizerData;
   runtime_profiler_ = new RuntimeProfiler(this);
   runtime_profiler_->SetUp();
 
@@ -2060,6 +2078,17 @@
 
   state_ = INITIALIZED;
   time_millis_at_init_ = OS::TimeCurrentMillis();
+
+  if (!create_heap_objects) {
+    // Now that the heap is consistent, it's OK to generate the code for the
+    // deopt entry table that might have been referred to by optimized code in
+    // the snapshot.
+    HandleScope scope(this);
+    Deoptimizer::EnsureCodeForDeoptimizationEntry(
+        Deoptimizer::LAZY,
+        kDeoptTableSerializeEntryCount - 1);
+  }
+
   if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
   return true;
 }
@@ -2174,6 +2203,12 @@
 }
 
 
+CodeStubInterfaceDescriptor*
+    Isolate::code_stub_interface_descriptor(int index) {
+  return code_stub_interface_descriptors_ + index;
+}
+
+
 #ifdef DEBUG
 #define ISOLATE_FIELD_OFFSET(type, name, ignored)                       \
 const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
diff --git a/src/isolate.h b/src/isolate.h
index ac2e554..9f086b2 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -53,6 +53,7 @@
 class Bootstrapper;
 class CodeGenerator;
 class CodeRange;
+struct CodeStubInterfaceDescriptor;
 class CompilationCache;
 class ContextSlotCache;
 class ContextSwitcher;
@@ -1015,7 +1016,6 @@
         RuntimeProfiler::IsolateEnteredJS(this);
       } else if (current_state == JS && state != JS) {
         // JS -> non-JS transition.
-        ASSERT(RuntimeProfiler::IsSomeIsolateInJS());
         RuntimeProfiler::IsolateExitedJS(this);
       } else {
         // Other types of state transitions are not interesting to the
@@ -1059,6 +1059,9 @@
     date_cache_ = date_cache;
   }
 
+  CodeStubInterfaceDescriptor*
+      code_stub_interface_descriptor(int index);
+
   void IterateDeferredHandles(ObjectVisitor* visitor);
   void LinkDeferredHandles(DeferredHandles* deferred_handles);
   void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
@@ -1241,6 +1244,7 @@
   RegExpStack* regexp_stack_;
   DateCache* date_cache_;
   unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
+  CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
 
   // The garbage collector should be a little more aggressive when it knows
   // that a context was recently exited.
diff --git a/src/json-parser.h b/src/json-parser.h
index 2f980cc..f6140f1 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -631,7 +631,17 @@
                                                         position_);
       }
       if (c0 < 0x20) return Handle<String>::null();
-      running_hash = StringHasher::AddCharacterCore(running_hash, c0);
+      if (static_cast<uint32_t>(c0) >
+          unibrow::Utf16::kMaxNonSurrogateCharCode) {
+        running_hash =
+            StringHasher::AddCharacterCore(running_hash,
+                                           unibrow::Utf16::LeadSurrogate(c0));
+        running_hash =
+            StringHasher::AddCharacterCore(running_hash,
+                                           unibrow::Utf16::TrailSurrogate(c0));
+      } else {
+        running_hash = StringHasher::AddCharacterCore(running_hash, c0);
+      }
       position++;
       if (position >= source_length_) return Handle<String>::null();
       c0 = seq_source_->SeqOneByteStringGet(position);
@@ -685,9 +695,7 @@
   int length = position_ - beg_pos;
   Handle<String> result;
   if (seq_ascii && is_symbol) {
-    result = factory()->LookupAsciiSymbol(seq_source_,
-                                          beg_pos,
-                                          length);
+    result = factory()->LookupOneByteSymbol(seq_source_, beg_pos, length);
   } else {
     result = factory()->NewRawOneByteString(length, pretenure_);
     char* dest = SeqOneByteString::cast(*result)->GetChars();
diff --git a/src/json-stringifier.h b/src/json-stringifier.h
index 7a8af30..426a370 100644
--- a/src/json-stringifier.h
+++ b/src/json-stringifier.h
@@ -216,7 +216,8 @@
                            factory_->ToObject(factory_->empty_string()));
   part_length_ = kInitialPartLength;
   current_part_ = factory_->NewRawOneByteString(kInitialPartLength);
-  tojson_symbol_ = factory_->LookupAsciiSymbol("toJSON");
+  tojson_symbol_ =
+      factory_->LookupOneByteSymbol(STATIC_ASCII_VECTOR("toJSON"));
   stack_ = factory_->NewJSArray(8);
 }
 
diff --git a/src/list-inl.h b/src/list-inl.h
index 60a033d..7a84313 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -85,8 +85,9 @@
 
 template<typename T, class P>
 void List<T, P>::Resize(int new_capacity, P alloc) {
+  ASSERT_LE(length_, new_capacity);
   T* new_data = NewData(new_capacity, alloc);
-  memcpy(new_data, data_, capacity_ * sizeof(T));
+  memcpy(new_data, data_, length_ * sizeof(T));
   List<T, P>::DeleteData(data_);
   data_ = new_data;
   capacity_ = new_capacity;
@@ -162,6 +163,14 @@
 
 
 template<typename T, class P>
+void List<T, P>::Trim(P alloc) {
+  if (length_ < capacity_ / 4) {
+    Resize(capacity_ / 2, alloc);
+  }
+}
+
+
+template<typename T, class P>
 void List<T, P>::Iterate(void (*callback)(T* x)) {
   for (int i = 0; i < length_; i++) callback(&data_[i]);
 }
diff --git a/src/list.h b/src/list.h
index 7fd4f5c..43d982f 100644
--- a/src/list.h
+++ b/src/list.h
@@ -149,6 +149,9 @@
   // Drop the last 'count' elements from the list.
   INLINE(void RewindBy(int count)) { Rewind(length_ - count); }
 
+  // Halve the capacity if fill level is less than a quarter.
+  INLINE(void Trim(AllocationPolicy allocator = AllocationPolicy()));
+
   bool Contains(const T& elm) const;
   int CountOccurrences(const T& elm, int start, int end) const;
 
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 91a9811..b23c867 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -606,7 +606,7 @@
 
 
 int LAllocator::FixedDoubleLiveRangeID(int index) {
-  return -index - 1 - Register::kNumAllocatableRegisters;
+  return -index - 1 - Register::kMaxNumAllocatableRegisters;
 }
 
 
@@ -638,7 +638,7 @@
 
 
 LiveRange* LAllocator::FixedLiveRangeFor(int index) {
-  ASSERT(index < Register::kNumAllocatableRegisters);
+  ASSERT(index < Register::kMaxNumAllocatableRegisters);
   LiveRange* result = fixed_live_ranges_[index];
   if (result == NULL) {
     result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_);
@@ -651,7 +651,7 @@
 
 
 LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
-  ASSERT(index < DoubleRegister::kNumAllocatableRegisters);
+  ASSERT(index < DoubleRegister::NumAllocatableRegisters());
   LiveRange* result = fixed_double_live_ranges_[index];
   if (result == NULL) {
     result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_);
@@ -768,6 +768,7 @@
 void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
   int start = block->first_instruction_index();
   int end = block->last_instruction_index();
+  if (start == -1) return;
   for (int i = start; i <= end; ++i) {
     if (IsGapAt(i)) {
       LInstruction* instr = NULL;
@@ -946,8 +947,8 @@
           Define(curr_position, output, NULL);
         }
 
-        if (instr->IsMarkedAsCall()) {
-          for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+        if (instr->ClobbersRegisters()) {
+          for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
             if (output == NULL || !output->IsRegister() ||
                 output->index() != i) {
               LiveRange* range = FixedLiveRangeFor(i);
@@ -958,8 +959,8 @@
           }
         }
 
-        if (instr->IsMarkedAsCall()) {
-          for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+        if (instr->ClobbersDoubleRegisters()) {
+          for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
             if (output == NULL || !output->IsDoubleRegister() ||
                 output->index() != i) {
               LiveRange* range = FixedDoubleLiveRangeFor(i);
@@ -989,7 +990,7 @@
 
         for (TempIterator it(instr); !it.Done(); it.Advance()) {
           LOperand* temp = it.Current();
-          if (instr->IsMarkedAsCall()) {
+          if (instr->ClobbersTemps()) {
             if (temp->IsRegister()) continue;
             if (temp->IsUnallocated()) {
               LUnallocated* temp_unalloc = LUnallocated::cast(temp);
@@ -1324,8 +1325,14 @@
       while (!iterator.Done()) {
         found = true;
         int operand_index = iterator.Current();
-        PrintF("Function: %s\n",
-               *chunk_->info()->function()->debug_name()->ToCString());
+        if (chunk_->info()->IsStub()) {
+          CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey();
+          PrintF("Function: %s\n", CodeStub::MajorName(major_key, false));
+        } else {
+          ASSERT(chunk_->info()->IsOptimizing());
+          PrintF("Function: %s\n",
+                 *chunk_->info()->function()->debug_name()->ToCString());
+        }
         PrintF("Value %d used before first definition!\n", operand_index);
         LiveRange* range = LiveRangeFor(operand_index);
         PrintF("First use is at %d\n", range->first_pos()->pos().Value());
@@ -1471,14 +1478,14 @@
 
 void LAllocator::AllocateGeneralRegisters() {
   HPhase phase("L_Allocate general registers", this);
-  num_registers_ = Register::kNumAllocatableRegisters;
+  num_registers_ = Register::NumAllocatableRegisters();
   AllocateRegisters();
 }
 
 
 void LAllocator::AllocateDoubleRegisters() {
   HPhase phase("L_Allocate double registers", this);
-  num_registers_ = DoubleRegister::kNumAllocatableRegisters;
+  num_registers_ = DoubleRegister::NumAllocatableRegisters();
   mode_ = DOUBLE_REGISTERS;
   AllocateRegisters();
 }
@@ -1757,14 +1764,14 @@
 
 // TryAllocateFreeReg and AllocateBlockedReg assume this
 // when allocating local arrays.
-STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >=
-              Register::kNumAllocatableRegisters);
+STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
+              Register::kMaxNumAllocatableRegisters);
 
 
 bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
-  LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters];
+  LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
 
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; i++) {
     free_until_pos[i] = LifetimePosition::MaxPosition();
   }
 
@@ -1853,10 +1860,10 @@
   }
 
 
-  LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters];
-  LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters];
+  LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+  LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
 
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
     use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
   }
 
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index 5b05263..0dd192d 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -608,9 +608,9 @@
   ZoneList<LiveRange*> live_ranges_;
 
   // Lists of live ranges
-  EmbeddedVector<LiveRange*, Register::kNumAllocatableRegisters>
+  EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
       fixed_live_ranges_;
-  EmbeddedVector<LiveRange*, DoubleRegister::kNumAllocatableRegisters>
+  EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
       fixed_double_live_ranges_;
   ZoneList<LiveRange*> unhandled_live_ranges_;
   ZoneList<LiveRange*> active_live_ranges_;
diff --git a/src/lithium.cc b/src/lithium.cc
index eb2198d..7ad175e 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -414,7 +414,7 @@
 }
 
 
-Handle<Code> LChunk::Codegen() {
+Handle<Code> LChunk::Codegen(Code::Kind kind) {
   MacroAssembler assembler(info()->isolate(), NULL, 0);
   LCodeGen generator(this, &assembler, info());
 
@@ -425,7 +425,7 @@
       PrintF("Crankshaft Compiler - ");
     }
     CodeGenerator::MakeCodePrologue(info());
-    Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
+    Code::Flags flags = Code::ComputeFlags(kind);
     Handle<Code> code =
         CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
     generator.FinishCode(code);
diff --git a/src/lithium.h b/src/lithium.h
index 089926e..cc2cde0 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -682,7 +682,7 @@
 
   Zone* zone() const { return info_->zone(); }
 
-  Handle<Code> Codegen();
+  Handle<Code> Codegen(Code::Kind kind);
 
  protected:
   LChunk(CompilationInfo* info, HGraph* graph)
diff --git a/src/liveedit.cc b/src/liveedit.cc
index f491e37..a981f47 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -952,11 +952,11 @@
 
       Factory* factory = isolate->factory();
       Handle<String> start_pos_key =
-          factory->LookupAsciiSymbol("startPosition");
+          factory->LookupOneByteSymbol(STATIC_ASCII_VECTOR("startPosition"));
       Handle<String> end_pos_key =
-          factory->LookupAsciiSymbol("endPosition");
+          factory->LookupOneByteSymbol(STATIC_ASCII_VECTOR("endPosition"));
       Handle<String> script_obj_key =
-          factory->LookupAsciiSymbol("scriptObject");
+          factory->LookupOneByteSymbol(STATIC_ASCII_VECTOR("scriptObject"));
       Handle<Smi> start_pos(Smi::FromInt(message_location.start_pos()));
       Handle<Smi> end_pos(Smi::FromInt(message_location.end_pos()));
       Handle<JSValue> script_obj = GetScriptWrapper(message_location.script());
@@ -1223,23 +1223,15 @@
 }
 
 
-class DependentFunctionsDeoptimizingVisitor : public OptimizedFunctionVisitor {
+class DependentFunctionFilter : public OptimizedFunctionFilter {
  public:
-  explicit DependentFunctionsDeoptimizingVisitor(
+  explicit DependentFunctionFilter(
       SharedFunctionInfo* function_info)
       : function_info_(function_info) {}
 
-  virtual void EnterContext(Context* context) {
-  }
-
-  virtual void VisitFunction(JSFunction* function) {
-    if (function->shared() == function_info_ ||
-        IsInlined(function, function_info_)) {
-      Deoptimizer::DeoptimizeFunction(function);
-    }
-  }
-
-  virtual void LeaveContext(Context* context) {
+  virtual bool TakeFunction(JSFunction* function) {
+    return (function->shared() == function_info_ ||
+            IsInlined(function, function_info_));
   }
 
  private:
@@ -1250,8 +1242,8 @@
 static void DeoptimizeDependentFunctions(SharedFunctionInfo* function_info) {
   AssertNoAllocation no_allocation;
 
-  DependentFunctionsDeoptimizingVisitor visitor(function_info);
-  Deoptimizer::VisitAllOptimizedFunctions(&visitor);
+  DependentFunctionFilter filter(function_info);
+  Deoptimizer::DeoptimizeAllFunctionsWith(&filter);
 }
 
 
diff --git a/src/log.cc b/src/log.cc
index d0dc76d..88527ca 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1457,6 +1457,7 @@
       case Code::BINARY_OP_IC:   // fall through
       case Code::COMPARE_IC:  // fall through
       case Code::TO_BOOLEAN_IC:  // fall through
+      case Code::COMPILED_STUB:  // fall through
       case Code::STUB:
         description =
             CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 8ca14db..adbc5e7 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -835,8 +835,6 @@
   // GC, because it relies on the new address of certain old space
   // objects (empty string, illegal builtin).
   heap()->isolate()->stub_cache()->Clear();
-
-  heap()->external_string_table_.CleanUp();
 }
 
 
@@ -2030,6 +2028,7 @@
   symbol_table->ElementsRemoved(v.PointersRemoved());
   heap()->external_string_table_.Iterate(&v);
   heap()->external_string_table_.CleanUp();
+  heap()->error_object_list_.RemoveUnmarked(heap());
 
   // Process the weak references.
   MarkCompactWeakObjectRetainer mark_compact_object_retainer;
@@ -3069,6 +3068,9 @@
   heap_->UpdateReferencesInExternalStringTable(
       &UpdateReferenceInExternalStringTableEntry);
 
+  // Update pointers in the new error object list.
+  heap_->error_object_list()->UpdateReferences();
+
   if (!FLAG_watch_ic_patching) {
     // Update JSFunction pointers from the runtime profiler.
     heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
diff --git a/src/messages.cc b/src/messages.cc
index ce965fc..801930d 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -61,7 +61,7 @@
     Vector< Handle<Object> > args,
     Handle<String> stack_trace,
     Handle<JSArray> stack_frames) {
-  Handle<String> type_handle = FACTORY->LookupAsciiSymbol(type);
+  Handle<String> type_handle = FACTORY->LookupUtf8Symbol(type);
   Handle<FixedArray> arguments_elements =
       FACTORY->NewFixedArray(args.length());
   for (int i = 0; i < args.length(); i++) {
@@ -149,7 +149,8 @@
 
 
 Handle<String> MessageHandler::GetMessage(Handle<Object> data) {
-  Handle<String> fmt_str = FACTORY->LookupAsciiSymbol("FormatMessage");
+  Handle<String> fmt_str =
+      FACTORY->LookupOneByteSymbol(STATIC_ASCII_VECTOR("FormatMessage"));
   Handle<JSFunction> fun =
       Handle<JSFunction>(
           JSFunction::cast(
@@ -168,7 +169,7 @@
                          &caught_exception);
 
   if (caught_exception || !result->IsString()) {
-    return FACTORY->LookupAsciiSymbol("<error>");
+    return FACTORY->LookupOneByteSymbol(STATIC_ASCII_VECTOR("<error>"));
   }
   Handle<String> result_string = Handle<String>::cast(result);
   // A string that has been obtained from JS code in this way is
diff --git a/src/messages.js b/src/messages.js
index 0a50ae7..24cd66d 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -192,7 +192,7 @@
   if (IS_NULL(obj)) return 'null';
   if (IS_FUNCTION(obj)) return  %_CallFunction(obj, FunctionToString);
   if (IS_OBJECT(obj) && %GetDataProperty(obj, "toString") === ObjectToString) {
-    var constructor = obj.constructor;
+    var constructor = %GetDataProperty(obj, "constructor");
     if (typeof constructor == "function") {
       var constructorName = constructor.name;
       if (IS_STRING(constructorName) && constructorName !== "") {
@@ -820,7 +820,7 @@
        %_CallFunction(this.receiver,
                       ownName,
                       ObjectLookupSetter) === this.fun ||
-       this.receiver[ownName] === this.fun)) {
+       %GetDataProperty(this.receiver, ownName) === this.fun)) {
     // To handle DontEnum properties we guess that the method has
     // the same name as the function.
     return ownName;
@@ -829,8 +829,7 @@
   for (var prop in this.receiver) {
     if (%_CallFunction(this.receiver, prop, ObjectLookupGetter) === this.fun ||
         %_CallFunction(this.receiver, prop, ObjectLookupSetter) === this.fun ||
-        (!%_CallFunction(this.receiver, prop, ObjectLookupGetter) &&
-         this.receiver[prop] === this.fun)) {
+        %GetDataProperty(this.receiver, prop) === this.fun) {
       // If we find more than one match bail out to avoid confusion.
       if (name) {
         return null;
@@ -933,12 +932,14 @@
     var typeName = GetTypeName(this, true);
     var methodName = this.getMethodName();
     if (functionName) {
-      if (typeName && functionName.indexOf(typeName) != 0) {
+      if (typeName &&
+          %_CallFunction(functionName, typeName, StringIndexOf) != 0) {
         line += typeName + ".";
       }
       line += functionName;
-      if (methodName && functionName.lastIndexOf("." + methodName) !=
-          functionName.length - methodName.length - 1) {
+      if (methodName &&
+          (%_CallFunction(functionName, "." + methodName, StringIndexOf) !=
+           functionName.length - methodName.length - 1)) {
         line += " [as " + methodName + "]";
       }
     } else {
@@ -1016,17 +1017,37 @@
   return eval_origin;
 }
 
-function FormatStackTrace(error, frames) {
-  var lines = [];
+
+function FormatErrorString(error) {
   try {
-    lines.push(error.toString());
+    return %_CallFunction(error, ErrorToString);
   } catch (e) {
     try {
-      lines.push("<error: " + e + ">");
+      return "<error: " + e + ">";
     } catch (ee) {
-      lines.push("<error>");
+      return "<error>";
     }
   }
+}
+
+
+function GetStackFrames(raw_stack) {
+  var frames = new InternalArray();
+  for (var i = 0; i < raw_stack.length; i += 4) {
+    var recv = raw_stack[i];
+    var fun = raw_stack[i + 1];
+    var code = raw_stack[i + 2];
+    var pc = raw_stack[i + 3];
+    var pos = %FunctionGetPositionForOffset(code, pc);
+    frames.push(new CallSite(recv, fun, pos));
+  }
+  return frames;
+}
+
+
+function FormatStackTrace(error_string, frames) {
+  var lines = new InternalArray();
+  lines.push(error_string);
   for (var i = 0; i < frames.length; i++) {
     var frame = frames[i];
     var line;
@@ -1042,25 +1063,9 @@
     }
     lines.push("    at " + line);
   }
-  return lines.join("\n");
+  return %_CallFunction(lines, "\n", ArrayJoin);
 }
 
-function FormatRawStackTrace(error, raw_stack) {
-  var frames = [ ];
-  for (var i = 0; i < raw_stack.length; i += 4) {
-    var recv = raw_stack[i];
-    var fun = raw_stack[i + 1];
-    var code = raw_stack[i + 2];
-    var pc = raw_stack[i + 3];
-    var pos = %FunctionGetPositionForOffset(code, pc);
-    frames.push(new CallSite(recv, fun, pos));
-  }
-  if (IS_FUNCTION($Error.prepareStackTrace)) {
-    return $Error.prepareStackTrace(error, frames);
-  } else {
-    return FormatStackTrace(error, frames);
-  }
-}
 
 function GetTypeName(obj, requireConstructor) {
   var constructor = obj.receiver.constructor;
@@ -1076,23 +1081,51 @@
   return constructorName;
 }
 
+
+// Flag to prevent recursive call of Error.prepareStackTrace.
+var formatting_custom_stack_trace = false;
+
+
 function captureStackTrace(obj, cons_opt) {
   var stackTraceLimit = $Error.stackTraceLimit;
   if (!stackTraceLimit || !IS_NUMBER(stackTraceLimit)) return;
   if (stackTraceLimit < 0 || stackTraceLimit > 10000) {
     stackTraceLimit = 10000;
   }
-  var raw_stack = %CollectStackTrace(obj,
-                                     cons_opt ? cons_opt : captureStackTrace,
-                                     stackTraceLimit);
+  var stack = %CollectStackTrace(obj,
+                                 cons_opt ? cons_opt : captureStackTrace,
+                                 stackTraceLimit);
+
+  // Don't be lazy if the error stack formatting is custom (observable).
+  if (IS_FUNCTION($Error.prepareStackTrace) && !formatting_custom_stack_trace) {
+    var array = [];
+    %MoveArrayContents(GetStackFrames(stack), array);
+    formatting_custom_stack_trace = true;
+    try {
+      obj.stack = $Error.prepareStackTrace(obj, array);
+    } catch (e) {
+      throw e;  // The custom formatting function threw.  Rethrow.
+    } finally {
+      formatting_custom_stack_trace = false;
+    }
+    return;
+  }
+
+  var error_string = FormatErrorString(obj);
   // Note that 'obj' and 'this' maybe different when called on objects that
   // have the error object on its prototype chain.  The getter replaces itself
   // with a data property as soon as the stack trace has been formatted.
+  // The getter must not change the object layout as it may be called after GC.
   var getter = function() {
-    var value = FormatRawStackTrace(obj, raw_stack);
-    %DefineOrRedefineDataProperty(obj, 'stack', value, NONE);
-    return value;
+    if (IS_STRING(stack)) return stack;
+    // Stack is still a raw array awaiting to be formatted.
+    stack = FormatStackTrace(error_string, GetStackFrames(stack));
+    // Release context value.
+    error_string = void 0;
+    return stack;
   };
+  %MarkOneShotGetter(getter);
+
   // The 'stack' property of the receiver is set as data property.  If
   // the receiver is the same as holder, this accessor pair is replaced.
   var setter = function(v) {
@@ -1239,23 +1272,32 @@
   // error object copy, but can be found on the prototype chain of 'this'.
   // When the stack trace is formatted, this accessor property is replaced by
   // a data property.
+  var error_string = boilerplate.name + ": " + boilerplate.message;
+
+  // The getter must not change the object layout as it may be called after GC.
   function getter() {
     var holder = this;
     while (!IS_ERROR(holder)) {
       holder = %GetPrototype(holder);
       if (holder == null) return MakeSyntaxError('illegal_access', []);
     }
-    var raw_stack = %GetOverflowedRawStackTrace(holder);
-    var result = IS_ARRAY(raw_stack) ? FormatRawStackTrace(holder, raw_stack)
-                                     : void 0;
-    %DefineOrRedefineDataProperty(holder, 'stack', result, NONE);
-    return result;
+    var stack = %GetOverflowedStackTrace(holder);
+    if (IS_STRING(stack)) return stack;
+    if (IS_ARRAY(stack)) {
+      var result = FormatStackTrace(error_string, GetStackFrames(stack));
+      %SetOverflowedStackTrace(holder, result);
+      return result;
+    }
+    return void 0;
   }
+  %MarkOneShotGetter(getter);
 
   // The 'stack' property of the receiver is set as data property.  If
   // the receiver is the same as holder, this accessor pair is replaced.
   function setter(v) {
     %DefineOrRedefineDataProperty(this, 'stack', v, NONE);
+    // Release the stack trace that is stored as hidden property, if exists.
+    %SetOverflowedStackTrace(this, void 0);
   }
 
   %DefineOrRedefineAccessorProperty(
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 0119c11..1da8089 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -246,7 +246,7 @@
                       HeapObject::kMapOffset,
                       a3,
                       t5,
-                      kRAHasBeenSaved,
+                      kRAHasNotBeenSaved,
                       kDontSaveFPRegs,
                       OMIT_REMEMBERED_SET,
                       OMIT_SMI_CHECK);
@@ -517,6 +517,50 @@
 }
 
 
+void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
+                                         String::Encoding encoding,
+                                         Register string,
+                                         Register index,
+                                         Register value) {
+  if (FLAG_debug_code) {
+    __ And(at, index, Operand(kSmiTagMask));
+    __ Check(eq, "Non-smi index", at, Operand(zero_reg));
+    __ And(at, value, Operand(kSmiTagMask));
+    __ Check(eq, "Non-smi value", at, Operand(zero_reg));
+
+    __ lw(at, FieldMemOperand(string, String::kLengthOffset));
+    __ Check(lt, "Index is too large", at, Operand(index));
+
+    __ Check(ge, "Index is negative", index, Operand(Smi::FromInt(0)));
+
+    __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
+    __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+
+    __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    __ Check(eq, "Unexpected string type", at,
+        Operand(encoding == String::ONE_BYTE_ENCODING
+                    ? one_byte_seq_type : two_byte_seq_type));
+  }
+
+  __ Addu(at,
+          string,
+          Operand(SeqString::kHeaderSize - kHeapObjectTag));
+  __ SmiUntag(value);
+  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ SmiUntag(index);
+    __ Addu(at, at, index);
+    __ sb(value, MemOperand(at));
+  } else {
+    // No need to untag a smi for two-byte addressing.
+    __ Addu(at, at, index);
+    __ sh(value, MemOperand(at));
+  }
+}
+
+
 static MemOperand ExpConstant(int index, Register base) {
   return MemOperand(base, index * kDoubleSize);
 }
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 0ed2414..13b0b43 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -70,6 +70,8 @@
                               int pos,
                               bool right_here = false);
 
+  DEFINE_AST_VISITOR_SUBCLASS_METHODS();
+
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index e8ed9cc..a134f59 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -42,11 +42,14 @@
 }
 
 
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
-  HandleScope scope;
+void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
+    JSFunction* function) {
+  Isolate* isolate = function->GetIsolate();
+  HandleScope scope(isolate);
   AssertNoAllocation no_allocation;
 
-  if (!function->IsOptimized()) return;
+  ASSERT(function->IsOptimized());
+  ASSERT(function->FunctionsInFunctionListShareSameCode());
 
   // The optimized code is going to be patched, so we cannot use it
   // any more.  Play safe and reset the whole cache.
@@ -87,8 +90,6 @@
 #endif
   }
 
-  Isolate* isolate = code->GetIsolate();
-
   // Add the deoptimizing code to the list.
   DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
   DeoptimizerData* data = isolate->deoptimizer_data();
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index f9f8c40..5bc5162 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -2047,7 +2047,7 @@
 
 
 void FullCodeGenerator::EmitAssignment(Expression* expr) {
-  // Invalid left-hand sides are rewritten to have a 'throw
+  // Invalid left-hand sides are rewritten by the parser to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
     VisitForEffect(expr);
@@ -3146,6 +3146,38 @@
 }
 
 
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  ASSERT_EQ(3, args->length());
+
+  VisitForStackValue(args->at(1));  // index
+  VisitForStackValue(args->at(2));  // value
+  __ pop(a2);
+  __ pop(a1);
+  VisitForAccumulatorValue(args->at(0));  // string
+
+  static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
+  SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  ASSERT_EQ(3, args->length());
+
+  VisitForStackValue(args->at(1));  // index
+  VisitForStackValue(args->at(2));  // value
+  __ pop(a2);
+  __ pop(a1);
+  VisitForAccumulatorValue(args->at(0));  // string
+
+  static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
+  SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2);
+  context()->Plug(v0);
+}
+
+
 void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
   ZoneList<Expression*>* args = expr->arguments();
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 22352e1..fd7af9f 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -1389,6 +1389,15 @@
 }
 
 
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+  SeqStringSetCharGenerator::Generate(masm(),
+                                      instr->encoding(),
+                                      ToRegister(instr->string()),
+                                      ToRegister(instr->index()),
+                                      ToRegister(instr->value()));
+}
+
+
 void LCodeGen::DoBitNotI(LBitNotI* instr) {
   Register input = ToRegister(instr->value());
   Register result = ToRegister(instr->result());
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 56dd33d..521b38d 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1531,6 +1531,16 @@
 }
 
 
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+  LOperand* string = UseRegister(instr->string());
+  LOperand* index = UseRegister(instr->index());
+  LOperand* value = UseRegister(instr->value());
+  LSeqStringSetChar* result =
+      new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+  return DefineAsRegister(result);
+}
+
+
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
   LOperand* value = UseRegisterOrConstantAtStart(instr->index());
   LOperand* length = UseRegister(instr->length());
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 17ef24c..b2ed72a 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -148,6 +148,7 @@
   V(Random)                                     \
   V(RegExpLiteral)                              \
   V(Return)                                     \
+  V(SeqStringSetChar)                           \
   V(ShiftI)                                     \
   V(SmiTag)                                     \
   V(SmiUntag)                                   \
@@ -1143,6 +1144,30 @@
 };
 
 
+class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+ public:
+  LSeqStringSetChar(String::Encoding encoding,
+                    LOperand* string,
+                    LOperand* index,
+                    LOperand* value) : encoding_(encoding) {
+    inputs_[0] = string;
+    inputs_[1] = index;
+    inputs_[2] = value;
+  }
+
+  String::Encoding encoding() { return encoding_; }
+  LOperand* string() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+
+ private:
+  String::Encoding encoding_;
+};
+
+
 class LThrow: public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LThrow(LOperand* value) {
diff --git a/src/object-observe.js b/src/object-observe.js
index 8c2895f..d0a84f6 100644
--- a/src/object-observe.js
+++ b/src/object-observe.js
@@ -82,14 +82,11 @@
   }
 
   var objectInfo = objectInfoMap.get(object);
-  if (IS_UNDEFINED(objectInfo)) {
-    objectInfo = CreateObjectInfo(object);
-    %SetIsObserved(object, true);
-  }
+  if (IS_UNDEFINED(objectInfo)) objectInfo = CreateObjectInfo(object);
+  %SetIsObserved(object, true);
 
   var changeObservers = objectInfo.changeObservers;
-  if (changeObservers.indexOf(callback) < 0)
-    changeObservers.push(callback);
+  if (changeObservers.indexOf(callback) < 0) changeObservers.push(callback);
 
   return object;
 }
@@ -106,8 +103,10 @@
 
   var changeObservers = objectInfo.changeObservers;
   var index = changeObservers.indexOf(callback);
-  if (index >= 0)
+  if (index >= 0) {
     changeObservers.splice(index, 1);
+    if (changeObservers.length === 0) %SetIsObserved(object, false);
+  }
 
   return object;
 }
@@ -144,24 +143,16 @@
   var target = notifierTargetMap.get(this);
   if (IS_UNDEFINED(target))
     throw MakeTypeError("observe_notify_non_notifier");
-
   if (!IS_STRING(changeRecord.type))
     throw MakeTypeError("observe_type_non_string");
 
   var objectInfo = objectInfoMap.get(target);
-  if (IS_UNDEFINED(objectInfo))
+  if (IS_UNDEFINED(objectInfo) || objectInfo.changeObservers.length === 0)
     return;
 
-  if (!objectInfo.changeObservers.length)
-    return;
-
-  var newRecord = {
-    object: target
-  };
+  var newRecord = { object: target };
   for (var prop in changeRecord) {
-    if (prop === 'object')
-      continue;
-
+    if (prop === 'object') continue;
     %DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
         READ_ONLY + DONT_DELETE);
   }
@@ -174,17 +165,13 @@
   if (!IS_SPEC_OBJECT(object))
     throw MakeTypeError("observe_non_object", ["getNotifier"]);
 
-  if (ObjectIsFrozen(object))
-    return null;
+  if (ObjectIsFrozen(object)) return null;
 
   var objectInfo = objectInfoMap.get(object);
-  if (IS_UNDEFINED(objectInfo))
-    objectInfo = CreateObjectInfo(object);
+  if (IS_UNDEFINED(objectInfo)) objectInfo = CreateObjectInfo(object);
 
   if (IS_NULL(objectInfo.notifier)) {
-    objectInfo.notifier = {
-      __proto__: notifierPrototype
-    };
+    objectInfo.notifier = { __proto__: notifierPrototype };
     notifierTargetMap.set(objectInfo.notifier, object);
   }
 
@@ -194,11 +181,11 @@
 function DeliverChangeRecordsForObserver(observer) {
   var observerInfo = observerInfoMap.get(observer);
   if (IS_UNDEFINED(observerInfo))
-    return;
+    return false;
 
   var pendingChangeRecords = observerInfo.pendingChangeRecords;
   if (IS_NULL(pendingChangeRecords))
-    return;
+    return false;
 
   observerInfo.pendingChangeRecords = null;
   delete observationState.pendingObservers[observerInfo.priority];
@@ -207,13 +194,14 @@
   try {
     %Call(void 0, delivered, observer);
   } catch (ex) {}
+  return true;
 }
 
 function ObjectDeliverChangeRecords(callback) {
   if (!IS_SPEC_FUNCTION(callback))
     throw MakeTypeError("observe_non_function", ["deliverChangeRecords"]);
 
-  DeliverChangeRecordsForObserver(callback);
+  while (DeliverChangeRecordsForObserver(callback)) {}
 }
 
 function DeliverChangeRecords() {
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index e3226c1..5045e1e 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -311,6 +311,9 @@
     SLOW_ASSERT(transitions()->IsSortedNoDuplicates());
     SLOW_ASSERT(transitions()->IsConsistentWithBackPointers(this));
   }
+  ASSERT(!is_observed() || instance_type() < FIRST_JS_OBJECT_TYPE ||
+         instance_type() > LAST_JS_OBJECT_TYPE ||
+         has_slow_elements_kind() || has_external_array_elements());
 }
 
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 67db5b4..7772cfe 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1416,16 +1416,29 @@
 
 
 MaybeObject* JSObject::ResetElements() {
-  Object* obj;
+  if (map()->is_observed()) {
+    // Maintain invariant that observed elements are always in dictionary mode.
+    SeededNumberDictionary* dictionary;
+    MaybeObject* maybe = SeededNumberDictionary::Allocate(0);
+    if (!maybe->To(&dictionary)) return maybe;
+    if (map() == GetHeap()->non_strict_arguments_elements_map()) {
+      FixedArray::cast(elements())->set(1, dictionary);
+    } else {
+      set_elements(dictionary);
+    }
+    return this;
+  }
+
   ElementsKind elements_kind = GetInitialFastElementsKind();
   if (!FLAG_smi_only_arrays) {
     elements_kind = FastSmiToObjectElementsKind(elements_kind);
   }
-  MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
-                                                    elements_kind);
-  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  set_map(Map::cast(obj));
+  MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(), elements_kind);
+  Map* map;
+  if (!maybe->To(&map)) return maybe;
+  set_map(map);
   initialize_elements();
+
   return this;
 }
 
@@ -2521,14 +2534,12 @@
     String* string,
     unsigned offset,
     Visitor& visitor,
-    ConsOp& consOp,
+    ConsOp& cons_op,
     int32_t type,
     unsigned length) {
-
   ASSERT(length == static_cast<unsigned>(string->length()));
   ASSERT(offset <= length);
-
-  unsigned sliceOffset = offset;
+  unsigned slice_offset = offset;
   while (true) {
     ASSERT(type == string->map()->instance_type());
 
@@ -2536,35 +2547,36 @@
       case kSeqStringTag | kOneByteStringTag:
         visitor.VisitOneByteString(
             reinterpret_cast<const uint8_t*>(
-                SeqOneByteString::cast(string)->GetChars()) + sliceOffset,
+                SeqOneByteString::cast(string)->GetChars()) + slice_offset,
                 length - offset);
         return;
 
       case kSeqStringTag | kTwoByteStringTag:
         visitor.VisitTwoByteString(
             reinterpret_cast<const uint16_t*>(
-                SeqTwoByteString::cast(string)->GetChars()) + sliceOffset,
+                SeqTwoByteString::cast(string)->GetChars()) + slice_offset,
                 length - offset);
         return;
 
       case kExternalStringTag | kOneByteStringTag:
         visitor.VisitOneByteString(
             reinterpret_cast<const uint8_t*>(
-                ExternalAsciiString::cast(string)->GetChars()) + sliceOffset,
+                ExternalAsciiString::cast(string)->GetChars()) + slice_offset,
                 length - offset);
         return;
 
       case kExternalStringTag | kTwoByteStringTag:
         visitor.VisitTwoByteString(
             reinterpret_cast<const uint16_t*>(
-                ExternalTwoByteString::cast(string)->GetChars()) + sliceOffset,
+                ExternalTwoByteString::cast(string)->GetChars())
+                    + slice_offset,
                 length - offset);
         return;
 
       case kSlicedStringTag | kOneByteStringTag:
       case kSlicedStringTag | kTwoByteStringTag: {
         SlicedString* slicedString = SlicedString::cast(string);
-        sliceOffset += slicedString->offset();
+        slice_offset += slicedString->offset();
         string = slicedString->parent();
         type = string->map()->instance_type();
         continue;
@@ -2572,10 +2584,9 @@
 
       case kConsStringTag | kOneByteStringTag:
       case kConsStringTag | kTwoByteStringTag:
-        string = consOp.Operate(ConsString::cast(string), &offset, &type,
-            &length);
+        string = cons_op.Operate(string, &offset, &type, &length);
         if (string == NULL) return;
-        sliceOffset = offset;
+        slice_offset = offset;
         ASSERT(length == static_cast<unsigned>(string->length()));
         continue;
 
@@ -2765,39 +2776,24 @@
 }
 
 
+String* ConsStringNullOp::Operate(String*, unsigned*, int32_t*, unsigned*) {
+  return NULL;
+}
+
+
 unsigned ConsStringIteratorOp::OffsetForDepth(unsigned depth) {
   return depth & kDepthMask;
 }
 
 
-uint32_t ConsStringIteratorOp::MaskForDepth(unsigned depth) {
-  return 1 << OffsetForDepth(depth);
-}
-
-
-void ConsStringIteratorOp::SetRightDescent() {
-  trace_ |= MaskForDepth(depth_ - 1);
-}
-
-
-void ConsStringIteratorOp::ClearRightDescent() {
-  trace_ &= ~MaskForDepth(depth_ - 1);
-}
-
-
 void ConsStringIteratorOp::PushLeft(ConsString* string) {
   frames_[depth_++ & kDepthMask] = string;
 }
 
 
-void ConsStringIteratorOp::PushRight(ConsString* string, int32_t type) {
-  // Inplace update
+void ConsStringIteratorOp::PushRight(ConsString* string) {
+  // Inplace update.
   frames_[(depth_-1) & kDepthMask] = string;
-  if (depth_ != 1) return;
-  // Optimization: can replace root in this case.
-  root_ = string;
-  root_type_ = type;
-  root_length_ = string->length();
 }
 
 
@@ -2813,78 +2809,72 @@
 }
 
 
-void ConsStringIteratorOp::Reset() {
-  consumed_ = 0;
-  ResetStack();
-}
-
-
 bool ConsStringIteratorOp::HasMore() {
   return depth_ != 0;
 }
 
 
-void ConsStringIteratorOp::ResetStack() {
+void ConsStringIteratorOp::Reset() {
   depth_ = 0;
-  maximum_depth_ = 0;
 }
 
 
-bool ConsStringIteratorOp::ContinueOperation(ContinueResponse* response) {
-  bool blewStack;
-  int32_t type;
-  String* string = NextLeaf(&blewStack, &type);
+String* ConsStringIteratorOp::ContinueOperation(int32_t* type_out,
+                                                unsigned* length_out) {
+  bool blew_stack = false;
+  String* string = NextLeaf(&blew_stack, type_out, length_out);
   // String found.
   if (string != NULL) {
-    unsigned length = string->length();
-    consumed_ += length;
-    response->string_ = string;
-    response->offset_ = 0;
-    response->length_ = length;
-    response->type_ = type;
-    return true;
+    // Verify output.
+    ASSERT(*length_out == static_cast<unsigned>(string->length()));
+    ASSERT(*type_out == string->map()->instance_type());
+    return string;
   }
   // Traversal complete.
-  if (!blewStack) return false;
-  // Restart search.
-  ResetStack();
-  response->string_ = root_;
-  response->offset_ = consumed_;
-  response->length_ = root_length_;
-  response->type_ = root_type_;
-  return true;
+  if (!blew_stack) return NULL;
+  // Restart search from root.
+  unsigned offset_out;
+  string = Search(&offset_out, type_out, length_out);
+  // Verify output.
+  ASSERT(string == NULL || offset_out == 0);
+  ASSERT(string == NULL ||
+         *length_out == static_cast<unsigned>(string->length()));
+  ASSERT(string == NULL || *type_out == string->map()->instance_type());
+  return string;
 }
 
 
 uint16_t StringCharacterStream::GetNext() {
-  ASSERT(buffer8_ != NULL);
+  ASSERT((buffer8_ == NULL && end_ == NULL) || buffer8_ < end_);
   return is_one_byte_ ? *buffer8_++ : *buffer16_++;
 }
 
 
 StringCharacterStream::StringCharacterStream(
     String* string, unsigned offset, ConsStringIteratorOp* op)
-  : is_one_byte_(true),
+  : is_one_byte_(false),
     buffer8_(NULL),
     end_(NULL),
     op_(op) {
   op->Reset();
-  String::Visit(string,
-      offset, *this, *op, string->map()->instance_type(), string->length());
+  int32_t type = string->map()->instance_type();
+  unsigned length = string->length();
+  String::Visit(string, offset, *this, *op, type, length);
 }
 
 
 bool StringCharacterStream::HasMore() {
   if (buffer8_ != end_) return true;
   if (!op_->HasMore()) return false;
-  ConsStringIteratorOp::ContinueResponse response;
-  // This has been checked above
-  if (!op_->ContinueOperation(&response)) {
-    UNREACHABLE();
-    return false;
-  }
-  String::Visit(response.string_,
-      response.offset_, *this, *op_, response.type_, response.length_);
+  unsigned length;
+  int32_t type;
+  String* string = op_->ContinueOperation(&type, &length);
+  if (string == NULL) return false;
+  ASSERT(!string->IsConsString());
+  ASSERT(string->length() != 0);
+  ConsStringNullOp null_op;
+  String::Visit(string, 0, *this, null_op, type, length);
+  ASSERT(buffer8_ != end_);
   return true;
 }
 
@@ -3384,6 +3374,9 @@
 
 
 void Map::set_is_observed(bool is_observed) {
+  ASSERT(instance_type() < FIRST_JS_OBJECT_TYPE ||
+         instance_type() > LAST_JS_OBJECT_TYPE ||
+         has_slow_elements_kind() || has_external_array_elements());
   set_bit_field3(IsObserved::update(bit_field3(), is_observed));
 }
 
@@ -3440,6 +3433,7 @@
 
 int Code::major_key() {
   ASSERT(kind() == STUB ||
+         kind() == COMPILED_STUB ||
          kind() == UNARY_OP_IC ||
          kind() == BINARY_OP_IC ||
          kind() == COMPARE_IC ||
@@ -3451,6 +3445,7 @@
 
 void Code::set_major_key(int major) {
   ASSERT(kind() == STUB ||
+         kind() == COMPILED_STUB ||
          kind() == UNARY_OP_IC ||
          kind() == BINARY_OP_IC ||
          kind() == COMPARE_IC ||
@@ -3559,7 +3554,7 @@
 
 
 unsigned Code::stack_slots() {
-  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
   return StackSlotsField::decode(
       READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
 }
@@ -3567,7 +3562,7 @@
 
 void Code::set_stack_slots(unsigned slots) {
   CHECK(slots <= (1 << kStackSlotsBitCount));
-  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
   int updated = StackSlotsField::update(previous, slots);
   WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
@@ -3575,7 +3570,7 @@
 
 
 unsigned Code::safepoint_table_offset() {
-  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
   return SafepointTableOffsetField::decode(
       READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
 }
@@ -3583,7 +3578,7 @@
 
 void Code::set_safepoint_table_offset(unsigned offset) {
   CHECK(offset <= (1 << kSafepointTableOffsetBitCount));
-  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
   ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
   int updated = SafepointTableOffsetField::update(previous, offset);
@@ -4103,6 +4098,7 @@
 SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
 
 
+SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset)
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
                kHiddenPrototypeBit)
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
@@ -4824,6 +4820,18 @@
 }
 
 
+void Code::set_deoptimizing_functions(Object* value) {
+  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+}
+
+
+Object* Code::deoptimizing_functions() {
+  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
+}
+
+
 ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
 INT_ACCESSORS(Code, ic_age, kICAgeOffset)
 
@@ -5136,7 +5144,7 @@
 }
 
 
-uint32_t StringHasher::AddCharacterCore(uint32_t running_hash, uint32_t c) {
+uint32_t StringHasher::AddCharacterCore(uint32_t running_hash, uint16_t c) {
   running_hash += c;
   running_hash += (running_hash << 10);
   running_hash ^= (running_hash >> 6);
@@ -5155,66 +5163,62 @@
 }
 
 
-void StringHasher::AddCharacter(uint32_t c) {
-  if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
-    AddSurrogatePair(c);  // Not inlined.
-    return;
-  }
+void StringHasher::AddCharacter(uint16_t c) {
   // Use the Jenkins one-at-a-time hash function to update the hash
   // for the given character.
   raw_running_hash_ = AddCharacterCore(raw_running_hash_, c);
-  // Incremental array index computation.
-  if (is_array_index_) {
-    if (c < '0' || c > '9') {
+}
+
+
+bool StringHasher::UpdateIndex(uint16_t c) {
+  ASSERT(is_array_index_);
+  if (c < '0' || c > '9') {
+    is_array_index_ = false;
+    return false;
+  }
+  int d = c - '0';
+  if (is_first_char_) {
+    is_first_char_ = false;
+    if (c == '0' && length_ > 1) {
       is_array_index_ = false;
-    } else {
-      int d = c - '0';
-      if (is_first_char_) {
-        is_first_char_ = false;
-        if (c == '0' && length_ > 1) {
-          is_array_index_ = false;
-          return;
-        }
-      }
-      if (array_index_ > 429496729U - ((d + 2) >> 3)) {
-        is_array_index_ = false;
-      } else {
-        array_index_ = array_index_ * 10 + d;
+      return false;
+    }
+  }
+  if (array_index_ > 429496729U - ((d + 2) >> 3)) {
+    is_array_index_ = false;
+    return false;
+  }
+  array_index_ = array_index_ * 10 + d;
+  return true;
+}
+
+
+template<typename Char>
+inline void StringHasher::AddCharacters(const Char* chars, int length) {
+  ASSERT(sizeof(Char) == 1 || sizeof(Char) == 2);
+  int i = 0;
+  if (is_array_index_) {
+    for (; i < length; i++) {
+      AddCharacter(chars[i]);
+      if (!UpdateIndex(chars[i])) {
+        i++;
+        break;
       }
     }
   }
-}
-
-
-void StringHasher::AddCharacterNoIndex(uint32_t c) {
-  ASSERT(!is_array_index());
-  if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
-    AddSurrogatePairNoIndex(c);  // Not inlined.
-    return;
+  for (; i < length; i++) {
+    ASSERT(!is_array_index_);
+    AddCharacter(chars[i]);
   }
-  raw_running_hash_ = AddCharacterCore(raw_running_hash_, c);
-}
-
-
-uint32_t StringHasher::GetHash() {
-  // Get the calculated raw hash value and do some more bit ops to distribute
-  // the hash further. Ensure that we never return zero as the hash value.
-  return GetHashCore(raw_running_hash_);
 }
 
 
 template <typename schar>
-uint32_t HashSequentialString(const schar* chars, int length, uint32_t seed) {
+uint32_t StringHasher::HashSequentialString(const schar* chars,
+                                            int length,
+                                            uint32_t seed) {
   StringHasher hasher(length, seed);
-  if (!hasher.has_trivial_hash()) {
-    int i;
-    for (i = 0; hasher.is_array_index() && (i < length); i++) {
-      hasher.AddCharacter(chars[i]);
-    }
-    for (; i < length; i++) {
-      hasher.AddCharacterNoIndex(chars[i]);
-    }
-  }
+  if (!hasher.has_trivial_hash()) hasher.AddCharacters(chars, length);
   return hasher.GetHashField();
 }
 
diff --git a/src/objects.cc b/src/objects.cc
index 885a5dd..e1d6da6 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1753,7 +1753,7 @@
                                    Handle<Object> old_value) {
   Isolate* isolate = object->GetIsolate();
   HandleScope scope;
-  Handle<String> type = isolate->factory()->LookupAsciiSymbol(type_str);
+  Handle<String> type = isolate->factory()->LookupUtf8Symbol(type_str);
   if (object->IsJSGlobalObject()) {
     object = handle(JSGlobalObject::cast(*object)->global_receiver(), isolate);
   }
@@ -2656,15 +2656,16 @@
   if (has_pending_exception) return Failure::Exception();
 
   // [[GetProperty]] requires to check that all properties are configurable.
-  Handle<String> configurable_name =
-      isolate->factory()->LookupAsciiSymbol("configurable_");
+  Handle<String> configurable_name = isolate->factory()->LookupOneByteSymbol(
+      STATIC_ASCII_VECTOR("configurable_"));
   Handle<Object> configurable(
       v8::internal::GetProperty(desc, configurable_name));
   ASSERT(!isolate->has_pending_exception());
   ASSERT(configurable->IsTrue() || configurable->IsFalse());
   if (configurable->IsFalse()) {
     Handle<String> trap =
-        isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+        isolate->factory()->LookupOneByteSymbol(
+            STATIC_ASCII_VECTOR("getPropertyDescriptor"));
     Handle<Object> args[] = { handler, trap, name };
     Handle<Object> error = isolate->factory()->NewTypeError(
         "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
@@ -2674,13 +2675,15 @@
 
   // Check for DataDescriptor.
   Handle<String> hasWritable_name =
-      isolate->factory()->LookupAsciiSymbol("hasWritable_");
+      isolate->factory()->LookupOneByteSymbol(
+          STATIC_ASCII_VECTOR("hasWritable_"));
   Handle<Object> hasWritable(v8::internal::GetProperty(desc, hasWritable_name));
   ASSERT(!isolate->has_pending_exception());
   ASSERT(hasWritable->IsTrue() || hasWritable->IsFalse());
   if (hasWritable->IsTrue()) {
     Handle<String> writable_name =
-        isolate->factory()->LookupAsciiSymbol("writable_");
+        isolate->factory()->LookupOneByteSymbol(
+            STATIC_ASCII_VECTOR("writable_"));
     Handle<Object> writable(v8::internal::GetProperty(desc, writable_name));
     ASSERT(!isolate->has_pending_exception());
     ASSERT(writable->IsTrue() || writable->IsFalse());
@@ -2694,7 +2697,8 @@
   }
 
   // We have an AccessorDescriptor.
-  Handle<String> set_name = isolate->factory()->LookupAsciiSymbol("set_");
+  Handle<String> set_name = isolate->factory()->LookupOneByteSymbol(
+      STATIC_ASCII_VECTOR("set_"));
   Handle<Object> setter(v8::internal::GetProperty(desc, set_name));
   ASSERT(!isolate->has_pending_exception());
   if (!setter->IsUndefined()) {
@@ -2726,7 +2730,8 @@
   Object* bool_result = result->ToBoolean();
   if (mode == STRICT_DELETION && bool_result == GetHeap()->false_value()) {
     Handle<Object> handler(receiver->handler());
-    Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
+    Handle<String> trap_name = isolate->factory()->LookupOneByteSymbol(
+        STATIC_ASCII_VECTOR("delete"));
     Handle<Object> args[] = { handler, trap_name };
     Handle<Object> error = isolate->factory()->NewTypeError(
         "handler_failed", HandleVector(args, ARRAY_SIZE(args)));
@@ -2772,19 +2777,22 @@
   if (has_pending_exception) return NONE;
 
   // Convert result to PropertyAttributes.
-  Handle<String> enum_n = isolate->factory()->LookupAsciiSymbol("enumerable");
+  Handle<String> enum_n = isolate->factory()->LookupOneByteSymbol(
+      STATIC_ASCII_VECTOR("enumerable"));
   Handle<Object> enumerable(v8::internal::GetProperty(desc, enum_n));
   if (isolate->has_pending_exception()) return NONE;
-  Handle<String> conf_n = isolate->factory()->LookupAsciiSymbol("configurable");
+  Handle<String> conf_n = isolate->factory()->LookupOneByteSymbol(
+      STATIC_ASCII_VECTOR("configurable"));
   Handle<Object> configurable(v8::internal::GetProperty(desc, conf_n));
   if (isolate->has_pending_exception()) return NONE;
-  Handle<String> writ_n = isolate->factory()->LookupAsciiSymbol("writable");
+  Handle<String> writ_n = isolate->factory()->LookupOneByteSymbol(
+      STATIC_ASCII_VECTOR("writable"));
   Handle<Object> writable(v8::internal::GetProperty(desc, writ_n));
   if (isolate->has_pending_exception()) return NONE;
 
   if (configurable->IsFalse()) {
-    Handle<String> trap =
-        isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+    Handle<String> trap = isolate->factory()->LookupOneByteSymbol(
+        STATIC_ASCII_VECTOR("getPropertyDescriptor"));
     Handle<Object> args[] = { handler, trap, name };
     Handle<Object> error = isolate->factory()->NewTypeError(
         "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
@@ -2844,7 +2852,7 @@
   Isolate* isolate = GetIsolate();
   Handle<Object> handler(this->handler());
 
-  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol(name);
+  Handle<String> trap_name = isolate->factory()->LookupUtf8Symbol(name);
   Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
   if (isolate->has_pending_exception()) return trap;
 
@@ -2912,6 +2920,9 @@
         lookup, name_raw, value_raw, attributes, strict_mode, store_mode);
   }
 
+  ASSERT(!lookup->IsFound() || lookup->holder() == this ||
+         lookup->holder()->map()->is_hidden_prototype());
+
   // From this point on everything needs to be handlified, because
   // SetPropertyViaPrototypes might call back into JavaScript.
   HandleScope scope(isolate);
@@ -2943,8 +2954,9 @@
   }
 
   Handle<Object> old_value(heap->the_hole_value(), isolate);
-  if (FLAG_harmony_observation && map()->is_observed()) {
-    old_value = handle(lookup->GetLazyValue(), isolate);
+  if (FLAG_harmony_observation &&
+      map()->is_observed() && lookup->IsDataProperty()) {
+    old_value = Object::GetProperty(self, name);
   }
 
   // This is a real property that is not read-only, or it is a
@@ -2952,10 +2964,10 @@
   MaybeObject* result = *value;
   switch (lookup->type()) {
     case NORMAL:
-      result = self->SetNormalizedProperty(lookup, *value);
+      result = lookup->holder()->SetNormalizedProperty(lookup, *value);
       break;
     case FIELD:
-      result = self->FastPropertyAtPut(
+      result = lookup->holder()->FastPropertyAtPut(
           lookup->GetFieldIndex().field_index(), *value);
       break;
     case CONSTANT_FUNCTION:
@@ -2963,21 +2975,17 @@
       if (*value == lookup->GetConstantFunction()) return *value;
       // Preserve the attributes of this existing property.
       attributes = lookup->GetAttributes();
-      result = self->ConvertDescriptorToField(*name, *value, attributes);
+      result =
+          lookup->holder()->ConvertDescriptorToField(*name, *value, attributes);
       break;
     case CALLBACKS: {
       Object* callback_object = lookup->GetCallbackObject();
-      return self->SetPropertyWithCallback(callback_object,
-                                           *name,
-                                           *value,
-                                           lookup->holder(),
-                                           strict_mode);
+      return self->SetPropertyWithCallback(
+          callback_object, *name, *value, lookup->holder(), strict_mode);
     }
     case INTERCEPTOR:
-      result = self->SetPropertyWithInterceptor(*name,
-                                                *value,
-                                                attributes,
-                                                strict_mode);
+      result = lookup->holder()->SetPropertyWithInterceptor(
+          *name, *value, attributes, strict_mode);
       break;
     case TRANSITION: {
       Map* transition_map = lookup->GetTransitionTarget();
@@ -2989,15 +2997,15 @@
       if (details.type() == FIELD) {
         if (attributes == details.attributes()) {
           int field_index = descriptors->GetFieldIndex(descriptor);
-          result = self->AddFastPropertyUsingMap(transition_map,
-                                                 *name,
-                                                 *value,
-                                                 field_index);
+          result = lookup->holder()->AddFastPropertyUsingMap(
+              transition_map, *name, *value, field_index);
         } else {
-          result = self->ConvertDescriptorToField(*name, *value, attributes);
+          result = lookup->holder()->ConvertDescriptorToField(
+              *name, *value, attributes);
         }
       } else if (details.type() == CALLBACKS) {
-        result = self->ConvertDescriptorToField(*name, *value, attributes);
+        result = lookup->holder()->ConvertDescriptorToField(
+            *name, *value, attributes);
       } else {
         ASSERT(details.type() == CONSTANT_FUNCTION);
 
@@ -3005,12 +3013,12 @@
         if (constant_function == *value) {
           // If the same constant function is being added we can simply
           // transition to the target map.
-          self->set_map(transition_map);
+          lookup->holder()->set_map(transition_map);
           result = constant_function;
         } else {
           // Otherwise, replace with a map transition to a new map with a FIELD,
           // even if the value is a constant function.
-          result = self->ConvertTransitionToMapTransition(
+          result = lookup->holder()->ConvertTransitionToMapTransition(
               lookup->GetTransitionIndex(), *name, *value, attributes);
         }
       }
@@ -3030,8 +3038,8 @@
     } else {
       LookupResult new_lookup(isolate);
       self->LocalLookup(*name, &new_lookup, true);
-      ASSERT(!new_lookup.GetLazyValue()->IsTheHole());
-      if (!new_lookup.GetLazyValue()->SameValue(*old_value)) {
+      if (new_lookup.IsDataProperty() &&
+          !Object::GetProperty(self, name)->SameValue(*old_value)) {
         EnqueueChangeRecord(self, "updated", name, old_value);
       }
     }
@@ -3110,15 +3118,7 @@
   PropertyAttributes old_attributes = ABSENT;
   bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
   if (is_observed) {
-    // Function prototypes are stored specially
-    if (self->IsJSFunction() &&
-        JSFunction::cast(*self)->should_have_prototype() &&
-        name->Equals(isolate->heap()->prototype_symbol())) {
-      MaybeObject* maybe = Accessors::FunctionGetPrototype(*self, NULL);
-      if (!maybe->ToHandle(&old_value, isolate)) return maybe;
-    } else {
-      old_value = handle(lookup.GetLazyValue(), isolate);
-    }
+    if (lookup.IsDataProperty()) old_value = Object::GetProperty(self, name);
     old_attributes = lookup.GetAttributes();
   }
 
@@ -3188,11 +3188,11 @@
     } else {
       LookupResult new_lookup(isolate);
       self->LocalLookup(*name, &new_lookup, true);
-      ASSERT(!new_lookup.GetLazyValue()->IsTheHole());
       if (old_value->IsTheHole() ||
           new_lookup.GetAttributes() != old_attributes) {
         EnqueueChangeRecord(self, "reconfigured", name, old_value);
-      } else if (!new_lookup.GetLazyValue()->SameValue(*old_value)) {
+      } else if (new_lookup.IsDataProperty() &&
+          !Object::GetProperty(self, name)->SameValue(*old_value)) {
         EnqueueChangeRecord(self, "updated", name, old_value);
       }
     }
@@ -4255,8 +4255,8 @@
 
   Handle<Object> old_value(isolate->heap()->the_hole_value());
   bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
-  if (is_observed) {
-    old_value = handle(lookup.GetLazyValue(), isolate);
+  if (is_observed && lookup.IsDataProperty()) {
+    old_value = Object::GetProperty(self, hname);
   }
   MaybeObject* result;
 
@@ -4947,7 +4947,9 @@
       LookupResult lookup(isolate);
       LocalLookup(*name, &lookup, true);
       preexists = lookup.IsProperty();
-      if (preexists) old_value = handle(lookup.GetLazyValue(), isolate);
+      if (preexists && lookup.IsDataProperty()) {
+        old_value = Object::GetProperty(self, name);
+      }
     }
   }
 
@@ -7026,71 +7028,79 @@
 }
 
 
-String* ConsStringIteratorOp::Operate(ConsString* consString,
-    unsigned* outerOffset, int32_t* typeOut, unsigned* lengthOut) {
-  ASSERT(*lengthOut == (unsigned)consString->length());
-  // Push the root string.
-  PushLeft(consString);
-  root_ = consString;
-  root_type_ = *typeOut;
-  root_length_ = *lengthOut;
-  unsigned targetOffset = *outerOffset;
+String* ConsStringIteratorOp::Operate(String* string,
+                                      unsigned* offset_out,
+                                      int32_t* type_out,
+                                      unsigned* length_out) {
+  ASSERT(string->IsConsString());
+  ConsString* cons_string = ConsString::cast(string);
+  // Set up search data.
+  root_ = cons_string;
+  consumed_ = *offset_out;
+  // Now search.
+  return Search(offset_out, type_out, length_out);
+}
+
+
+String* ConsStringIteratorOp::Search(unsigned* offset_out,
+                                     int32_t* type_out,
+                                     unsigned* length_out) {
+  ConsString* cons_string = root_;
+  // Reset the stack, pushing the root string.
+  depth_ = 1;
+  maximum_depth_ = 1;
+  frames_[0] = cons_string;
+  const unsigned consumed = consumed_;
   unsigned offset = 0;
   while (true) {
     // Loop until the string is found which contains the target offset.
-    String* string = consString->first();
+    String* string = cons_string->first();
     unsigned length = string->length();
     int32_t type;
-    if (targetOffset < offset + length) {
+    if (consumed < offset + length) {
       // Target offset is in the left branch.
-      // Mark the descent.
-      ClearRightDescent();
       // Keep going if we're still in a ConString.
       type = string->map()->instance_type();
       if ((type & kStringRepresentationMask) == kConsStringTag) {
-        consString = ConsString::cast(string);
-        PushLeft(consString);
+        cons_string = ConsString::cast(string);
+        PushLeft(cons_string);
         continue;
       }
+      // Tell the stack we're done decending.
+      AdjustMaximumDepth();
     } else {
       // Descend right.
       // Update progress through the string.
       offset += length;
       // Keep going if we're still in a ConString.
-      string = consString->second();
+      string = cons_string->second();
       type = string->map()->instance_type();
       if ((type & kStringRepresentationMask) == kConsStringTag) {
-        consString = ConsString::cast(string);
-        PushRight(consString, type);
+        cons_string = ConsString::cast(string);
+        PushRight(cons_string);
+        // TODO(dcarney) Add back root optimization.
         continue;
       }
-      // Mark the descent.
-      SetRightDescent();
       // Need this to be updated for the current string.
       length = string->length();
       // Account for the possibility of an empty right leaf.
-      while (length == 0) {
-        bool blewStack;
-        // Need to adjust maximum depth for NextLeaf to work.
-        AdjustMaximumDepth();
-        string = NextLeaf(&blewStack, &type);
-        if (string == NULL) {
-          // Luckily, this case is impossible.
-          ASSERT(!blewStack);
-          return NULL;
-        }
-        length = string->length();
+      // This happens only if we have asked for an offset outside the string.
+      if (length == 0) {
+        // Reset depth so future operations will return null immediately.
+        Reset();
+        return NULL;
       }
+      // Tell the stack we're done decending.
+      AdjustMaximumDepth();
+      // Pop stack so next iteration is in correct place.
+      Pop();
     }
-    // Tell the stack we're done decending.
-    AdjustMaximumDepth();
     ASSERT(length != 0);
     // Adjust return values and exit.
-    unsigned innerOffset = targetOffset - offset;
-    consumed_ += length - innerOffset;
-    *outerOffset = innerOffset;
-    *typeOut = type;
-    *lengthOut = length;
+    consumed_ = offset + length;
+    *offset_out = consumed - offset;
+    *type_out = type;
+    *length_out = length;
     return string;
   }
   UNREACHABLE();
@@ -7098,52 +7108,54 @@
 }
 
 
-String* ConsStringIteratorOp::NextLeaf(bool* blewStack, int32_t* typeOut) {
+String* ConsStringIteratorOp::NextLeaf(bool* blew_stack,
+                                       int32_t* type_out,
+                                       unsigned* length_out) {
   while (true) {
     // Tree traversal complete.
     if (depth_ == 0) {
-      *blewStack = false;
+      *blew_stack = false;
       return NULL;
     }
     // We've lost track of higher nodes.
     if (maximum_depth_ - depth_ == kStackSize) {
-      *blewStack = true;
+      *blew_stack = true;
       return NULL;
     }
-    // Check if we're done with this level.
-    bool haveAlreadyReadRight = trace_ & MaskForDepth(depth_ - 1);
-    if (haveAlreadyReadRight) {
-      Pop();
-      continue;
-    }
     // Go right.
-    ConsString* consString = frames_[OffsetForDepth(depth_ - 1)];
-    String* string = consString->second();
+    ConsString* cons_string = frames_[OffsetForDepth(depth_ - 1)];
+    String* string = cons_string->second();
     int32_t type = string->map()->instance_type();
     if ((type & kStringRepresentationMask) != kConsStringTag) {
-      // Don't need to mark the descent here.
       // Pop stack so next iteration is in correct place.
       Pop();
-      *typeOut = type;
+      unsigned length = (unsigned) string->length();
+      // Could be a flattened ConsString.
+      if (length == 0) continue;
+      *length_out = length;
+      *type_out = type;
+      consumed_ += length;
       return string;
     }
-    // No need to mark the descent.
-    consString = ConsString::cast(string);
-    PushRight(consString, type);
+    cons_string = ConsString::cast(string);
+    // TODO(dcarney) Add back root optimization.
+    PushRight(cons_string);
     // Need to traverse all the way left.
     while (true) {
       // Continue left.
-      // Update marker.
-      ClearRightDescent();
-      string = consString->first();
+      string = cons_string->first();
       type = string->map()->instance_type();
       if ((type & kStringRepresentationMask) != kConsStringTag) {
         AdjustMaximumDepth();
-        *typeOut = type;
+        unsigned length = (unsigned) string->length();
+        ASSERT(length != 0);
+        *length_out = length;
+        *type_out = type;
+        consumed_ += length;
         return string;
       }
-      consString = ConsString::cast(string);
-      PushLeft(consString);
+      cons_string = ConsString::cast(string);
+      PushLeft(cons_string);
     }
   }
   UNREACHABLE();
@@ -7629,14 +7641,20 @@
 
 
 bool String::IsEqualTo(Vector<const char> str) {
-  Isolate* isolate = GetIsolate();
   int slen = length();
-  Access<UnicodeCache::Utf8Decoder>
-      decoder(isolate->unicode_cache()->utf8_decoder());
-  decoder->Reset(str.start(), str.length());
+  // Can't check exact length equality, but we can check bounds.
+  int str_len = str.length();
+  if (str_len < slen ||
+      str_len > slen*static_cast<int>(unibrow::Utf8::kMaxEncodedSize)) {
+    return false;
+  }
   int i;
-  for (i = 0; i < slen && decoder->has_more(); i++) {
-    uint32_t r = decoder->GetNext();
+  unsigned remaining_in_str = static_cast<unsigned>(str_len);
+  const uint8_t* utf8_data = reinterpret_cast<const uint8_t*>(str.start());
+  for (i = 0; i < slen && remaining_in_str > 0; i++) {
+    unsigned cursor = 0;
+    uint32_t r = unibrow::Utf8::ValueOf(utf8_data, remaining_in_str, &cursor);
+    ASSERT(cursor > 0 && cursor <= remaining_in_str);
     if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
       if (i > slen - 1) return false;
       if (Get(i++) != unibrow::Utf16::LeadSurrogate(r)) return false;
@@ -7644,8 +7662,10 @@
     } else {
       if (Get(i) != r) return false;
     }
+    utf8_data += cursor;
+    remaining_in_str -= cursor;
   }
-  return i == slen && !decoder->has_more();
+  return i == slen && remaining_in_str == 0;
 }
 
 
@@ -7678,28 +7698,62 @@
 }
 
 
+class IteratingStringHasher: public StringHasher {
+ public:
+  static inline uint32_t Hash(String* string, uint32_t seed) {
+    const unsigned len = static_cast<unsigned>(string->length());
+    IteratingStringHasher hasher(len, seed);
+    if (hasher.has_trivial_hash()) {
+      return hasher.GetHashField();
+    }
+    int32_t type = string->map()->instance_type();
+    ConsStringNullOp null_op;
+    String::Visit(string, 0, hasher, null_op, type, len);
+    // Flat strings terminate immediately.
+    if (hasher.consumed_ == len) {
+      ASSERT(!string->IsConsString());
+      return hasher.GetHashField();
+    }
+    ASSERT(string->IsConsString());
+    // This is a ConsString, iterate across it.
+    ConsStringIteratorOp op;
+    unsigned offset = 0;
+    unsigned leaf_length = len;
+    string = op.Operate(string, &offset, &type, &leaf_length);
+    while (true) {
+      ASSERT(hasher.consumed_ < len);
+      String::Visit(string, 0, hasher, null_op, type, leaf_length);
+      if (hasher.consumed_ == len) break;
+      string = op.ContinueOperation(&type, &leaf_length);
+      // This should be taken care of by the length check.
+      ASSERT(string != NULL);
+    }
+    return hasher.GetHashField();
+  }
+  inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
+    AddCharacters(chars, static_cast<int>(length));
+    consumed_ += length;
+  }
+  inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
+    AddCharacters(chars, static_cast<int>(length));
+    consumed_ += length;
+  }
+
+ private:
+  inline IteratingStringHasher(int len, uint32_t seed)
+    : StringHasher(len, seed),
+      consumed_(0) {}
+  unsigned consumed_;
+  DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher);
+};
+
+
 uint32_t String::ComputeAndSetHash() {
   // Should only be called if hash code has not yet been computed.
   ASSERT(!HasHashCode());
 
-  const int len = length();
-
-  // Compute the hash code.
-  uint32_t field = 0;
-  if (StringShape(this).IsSequentialAscii()) {
-    field = HashSequentialString(SeqOneByteString::cast(this)->GetChars(),
-                                 len,
-                                 GetHeap()->HashSeed());
-  } else if (StringShape(this).IsSequentialTwoByte()) {
-    field = HashSequentialString(SeqTwoByteString::cast(this)->GetChars(),
-                                 len,
-                                 GetHeap()->HashSeed());
-  } else {
-    StringInputBuffer buffer(this);
-    field = ComputeHashField(&buffer, len, GetHeap()->HashSeed());
-  }
-
   // Store the hash code in the object.
+  uint32_t field = IteratingStringHasher::Hash(this, GetHeap()->HashSeed());
   set_hash_field(field);
 
   // Check the hash code is there.
@@ -7803,57 +7857,64 @@
 }
 
 
-void StringHasher::AddSurrogatePair(uc32 c) {
-  uint16_t lead = unibrow::Utf16::LeadSurrogate(c);
-  AddCharacter(lead);
-  uint16_t trail = unibrow::Utf16::TrailSurrogate(c);
-  AddCharacter(trail);
-}
-
-
-void StringHasher::AddSurrogatePairNoIndex(uc32 c) {
-  uint16_t lead = unibrow::Utf16::LeadSurrogate(c);
-  AddCharacterNoIndex(lead);
-  uint16_t trail = unibrow::Utf16::TrailSurrogate(c);
-  AddCharacterNoIndex(trail);
-}
-
-
 uint32_t StringHasher::GetHashField() {
   if (length_ <= String::kMaxHashCalcLength) {
-    if (is_array_index()) {
-      return MakeArrayIndexHash(array_index(), length_);
+    if (is_array_index_) {
+      return MakeArrayIndexHash(array_index_, length_);
     }
-    return (GetHash() << String::kHashShift) | String::kIsNotArrayIndexMask;
+    return (GetHashCore(raw_running_hash_) << String::kHashShift) |
+           String::kIsNotArrayIndexMask;
   } else {
     return (length_ << String::kHashShift) | String::kIsNotArrayIndexMask;
   }
 }
 
 
-uint32_t String::ComputeHashField(unibrow::CharacterStream* buffer,
-                                  int length,
-                                  uint32_t seed) {
-  StringHasher hasher(length, seed);
-
-  // Very long strings have a trivial hash that doesn't inspect the
-  // string contents.
-  if (hasher.has_trivial_hash()) {
-    return hasher.GetHashField();
+uint32_t StringHasher::ComputeUtf8Hash(Vector<const char> chars,
+                                       uint32_t seed,
+                                       int* utf16_length_out) {
+  int vector_length = chars.length();
+  // Handle some edge cases
+  if (vector_length <= 1) {
+    ASSERT(vector_length == 0 ||
+           static_cast<uint8_t>(chars.start()[0]) <=
+               unibrow::Utf8::kMaxOneByteChar);
+    *utf16_length_out = vector_length;
+    return HashSequentialString(chars.start(), vector_length, seed);
   }
-
-  // Do the iterative array index computation as long as there is a
-  // chance this is an array index.
-  while (buffer->has_more() && hasher.is_array_index()) {
-    hasher.AddCharacter(buffer->GetNext());
+  // Start with a fake length which won't affect computation.
+  // It will be updated later.
+  StringHasher hasher(String::kMaxArrayIndexSize, seed);
+  unsigned remaining = static_cast<unsigned>(vector_length);
+  const uint8_t* stream = reinterpret_cast<const uint8_t*>(chars.start());
+  int utf16_length = 0;
+  bool is_index = true;
+  ASSERT(hasher.is_array_index_);
+  while (remaining > 0) {
+    unsigned consumed = 0;
+    uint32_t c = unibrow::Utf8::ValueOf(stream, remaining, &consumed);
+    ASSERT(consumed > 0 && consumed <= remaining);
+    stream += consumed;
+    remaining -= consumed;
+    bool is_two_characters = c > unibrow::Utf16::kMaxNonSurrogateCharCode;
+    utf16_length += is_two_characters ? 2 : 1;
+    // No need to keep hashing. But we do need to calculate utf16_length.
+    if (utf16_length > String::kMaxHashCalcLength) continue;
+    if (is_two_characters) {
+      uint16_t c1 = unibrow::Utf16::LeadSurrogate(c);
+      uint16_t c2 = unibrow::Utf16::TrailSurrogate(c);
+      hasher.AddCharacter(c1);
+      hasher.AddCharacter(c2);
+      if (is_index) is_index = hasher.UpdateIndex(c1);
+      if (is_index) is_index = hasher.UpdateIndex(c2);
+    } else {
+      hasher.AddCharacter(c);
+      if (is_index) is_index = hasher.UpdateIndex(c);
+    }
   }
-
-  // Process the remaining characters without updating the array
-  // index.
-  while (buffer->has_more()) {
-    hasher.AddCharacterNoIndex(buffer->GetNext());
-  }
-
+  *utf16_length_out = static_cast<int>(utf16_length);
+  // Must set length here so that hash computation is correct.
+  hasher.length_ = utf16_length;
   return hasher.GetHashField();
 }
 
@@ -8338,7 +8399,7 @@
                                  byte kind) {
   String* symbol;
   { MaybeObject* maybe_symbol =
-        Isolate::Current()->heap()->LookupAsciiSymbol(to_string);
+        Isolate::Current()->heap()->LookupUtf8Symbol(CStrVector(to_string));
     if (!maybe_symbol->To(&symbol)) return maybe_symbol;
   }
   set_to_string(symbol);
@@ -9077,6 +9138,49 @@
 }
 
 
+void Code::PrintDeoptLocation(int bailout_id) {
+  const char* last_comment = NULL;
+  int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
+      | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+  for (RelocIterator it(this, mask); !it.done(); it.next()) {
+    RelocInfo* info = it.rinfo();
+    if (info->rmode() == RelocInfo::COMMENT) {
+      last_comment = reinterpret_cast<const char*>(info->data());
+    } else if (last_comment != NULL &&
+               bailout_id == Deoptimizer::GetDeoptimizationId(
+                   info->target_address(), Deoptimizer::EAGER)) {
+      CHECK(info->rmode() == RelocInfo::RUNTIME_ENTRY);
+      PrintF("            %s\n", last_comment);
+      return;
+    }
+  }
+}
+
+
+// Identify kind of code.
+const char* Code::Kind2String(Kind kind) {
+  switch (kind) {
+    case FUNCTION: return "FUNCTION";
+    case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
+    case COMPILED_STUB: return "COMPILED_STUB";
+    case STUB: return "STUB";
+    case BUILTIN: return "BUILTIN";
+    case LOAD_IC: return "LOAD_IC";
+    case KEYED_LOAD_IC: return "KEYED_LOAD_IC";
+    case STORE_IC: return "STORE_IC";
+    case KEYED_STORE_IC: return "KEYED_STORE_IC";
+    case CALL_IC: return "CALL_IC";
+    case KEYED_CALL_IC: return "KEYED_CALL_IC";
+    case UNARY_OP_IC: return "UNARY_OP_IC";
+    case BINARY_OP_IC: return "BINARY_OP_IC";
+    case COMPARE_IC: return "COMPARE_IC";
+    case TO_BOOLEAN_IC: return "TO_BOOLEAN_IC";
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
 #ifdef ENABLE_DISASSEMBLER
 
 void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
@@ -9136,6 +9240,12 @@
           break;
         }
 
+        case Translation::COMPILED_STUB_FRAME: {
+          Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next());
+          PrintF(out, "{kind=%d}", stub_kind);
+          break;
+        }
+
         case Translation::ARGUMENTS_ADAPTOR_FRAME:
         case Translation::CONSTRUCT_STUB_FRAME: {
           int function_id = iterator.Next();
@@ -9219,8 +9329,12 @@
           break;
         }
 
-        case Translation::ARGUMENTS_OBJECT:
+        case Translation::ARGUMENTS_OBJECT: {
+          int args_index = iterator.Next();
+          int args_length = iterator.Next();
+          PrintF(out, "{index=%d, length=%d}", args_index, args_length);
           break;
+        }
       }
       PrintF(out, "\n");
     }
@@ -9245,29 +9359,6 @@
 }
 
 
-// Identify kind of code.
-const char* Code::Kind2String(Kind kind) {
-  switch (kind) {
-    case FUNCTION: return "FUNCTION";
-    case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
-    case STUB: return "STUB";
-    case BUILTIN: return "BUILTIN";
-    case LOAD_IC: return "LOAD_IC";
-    case KEYED_LOAD_IC: return "KEYED_LOAD_IC";
-    case STORE_IC: return "STORE_IC";
-    case KEYED_STORE_IC: return "KEYED_STORE_IC";
-    case CALL_IC: return "CALL_IC";
-    case KEYED_CALL_IC: return "KEYED_CALL_IC";
-    case UNARY_OP_IC: return "UNARY_OP_IC";
-    case BINARY_OP_IC: return "BINARY_OP_IC";
-    case COMPARE_IC: return "COMPARE_IC";
-    case TO_BOOLEAN_IC: return "TO_BOOLEAN_IC";
-  }
-  UNREACHABLE();
-  return NULL;
-}
-
-
 const char* Code::ICState2String(InlineCacheState state) {
   switch (state) {
     case UNINITIALIZED: return "UNINITIALIZED";
@@ -9369,7 +9460,7 @@
   }
   PrintF("\n");
 
-  if (kind() == OPTIMIZED_FUNCTION) {
+  if (kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB) {
     SafepointTable table(this);
     PrintF(out, "Safepoints (size = %u)\n", table.size());
     for (unsigned i = 0; i < table.length(); i++) {
@@ -9427,6 +9518,7 @@
   Heap* heap = GetHeap();
   // We should never end in here with a pixel or external array.
   ASSERT(!HasExternalArrayElements());
+  ASSERT(!map()->is_observed());
 
   // Allocate a new fast elements backing store.
   FixedArray* new_elements;
@@ -9491,6 +9583,7 @@
   Heap* heap = GetHeap();
   // We should never end in here with a pixel or external array.
   ASSERT(!HasExternalArrayElements());
+  ASSERT(!map()->is_observed());
 
   FixedArrayBase* elems;
   { MaybeObject* maybe_obj =
@@ -10634,6 +10727,7 @@
 
 
 MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
+  ASSERT(!map()->is_observed());
   ElementsKind from_kind = map()->elements_kind();
 
   if (IsFastHoleyElementsKind(from_kind)) {
@@ -10890,6 +10984,9 @@
   // An object requiring access checks is never allowed to have fast
   // elements.  If it had fast elements we would skip security checks.
   if (IsAccessCheckNeeded()) return false;
+  // Observed objects may not go to fast mode because they rely on map checks,
+  // and for fast element accesses we sometimes check element kinds only.
+  if (FLAG_harmony_observation && map()->is_observed()) return false;
 
   FixedArray* elements = FixedArray::cast(this->elements());
   SeededNumberDictionary* dictionary = NULL;
@@ -11651,10 +11748,7 @@
 
   uint32_t Hash() {
     if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
-    unibrow::Utf8InputBuffer<> buffer(string_.start(),
-                                      static_cast<unsigned>(string_.length()));
-    chars_ = buffer.Utf16Length();
-    hash_field_ = String::ComputeHashField(&buffer, chars_, seed_);
+    hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
     uint32_t result = hash_field_ >> String::kHashShift;
     ASSERT(result != 0);  // Ensure that the hash value of 0 is never computed.
     return result;
@@ -11684,29 +11778,9 @@
       : string_(string), hash_field_(0), seed_(seed) { }
 
   uint32_t Hash() {
-    StringHasher hasher(string_.length(), seed_);
-
-    // Very long strings have a trivial hash that doesn't inspect the
-    // string contents.
-    if (hasher.has_trivial_hash()) {
-      hash_field_ = hasher.GetHashField();
-    } else {
-      int i = 0;
-      // Do the iterative array index computation as long as there is a
-      // chance this is an array index.
-      while (i < string_.length() && hasher.is_array_index()) {
-        hasher.AddCharacter(static_cast<uc32>(string_[i]));
-        i++;
-      }
-
-      // Process the remaining characters without updating the array
-      // index.
-      while (i < string_.length()) {
-        hasher.AddCharacterNoIndex(static_cast<uc32>(string_[i]));
-        i++;
-      }
-      hash_field_ = hasher.GetHashField();
-    }
+    hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(),
+                                                           string_.length(),
+                                                           seed_);
 
     uint32_t result = hash_field_ >> String::kHashShift;
     ASSERT(result != 0);  // Ensure that the hash value of 0 is never computed.
@@ -11751,32 +11825,9 @@
   uint32_t Hash() {
     ASSERT(length_ >= 0);
     ASSERT(from_ + length_ <= string_->length());
-    StringHasher hasher(length_, string_->GetHeap()->HashSeed());
-
-    // Very long strings have a trivial hash that doesn't inspect the
-    // string contents.
-    if (hasher.has_trivial_hash()) {
-      hash_field_ = hasher.GetHashField();
-    } else {
-      int i = 0;
-      // Do the iterative array index computation as long as there is a
-      // chance this is an array index.
-      while (i < length_ && hasher.is_array_index()) {
-        hasher.AddCharacter(static_cast<uc32>(
-            string_->SeqOneByteStringGet(i + from_)));
-        i++;
-      }
-
-      // Process the remaining characters without updating the array
-      // index.
-      while (i < length_) {
-        hasher.AddCharacterNoIndex(static_cast<uc32>(
-            string_->SeqOneByteStringGet(i + from_)));
-        i++;
-      }
-      hash_field_ = hasher.GetHashField();
-    }
-
+    char* chars = string_->GetChars() + from_;
+    hash_field_ = StringHasher::HashSequentialString(
+        chars, length_, string_->GetHeap()->HashSeed());
     uint32_t result = hash_field_ >> String::kHashShift;
     ASSERT(result != 0);  // Ensure that the hash value of 0 is never computed.
     return result;
@@ -11851,8 +11902,7 @@
       return string_;
     }
     // Otherwise allocate a new symbol.
-    StringInputBuffer buffer(string_);
-    return heap->AllocateInternalSymbol(&buffer,
+    return heap->AllocateInternalSymbol(string_,
                                         string_->length(),
                                         string_->hash_field());
   }
@@ -12622,7 +12672,7 @@
 // algorithm.
 class TwoCharHashTableKey : public HashTableKey {
  public:
-  TwoCharHashTableKey(uint32_t c1, uint32_t c2, uint32_t seed)
+  TwoCharHashTableKey(uint16_t c1, uint16_t c2, uint32_t seed)
     : c1_(c1), c2_(c2) {
     // Char 1.
     uint32_t hash = seed;
@@ -12638,17 +12688,17 @@
     hash ^= hash >> 11;
     hash += hash << 15;
     if ((hash & String::kHashBitMask) == 0) hash = StringHasher::kZeroHash;
+    hash_ = hash;
 #ifdef DEBUG
-    StringHasher hasher(2, seed);
-    hasher.AddCharacter(c1);
-    hasher.AddCharacter(c2);
     // If this assert fails then we failed to reproduce the two-character
     // version of the string hashing algorithm above.  One reason could be
     // that we were passed two digits as characters, since the hash
     // algorithm is different in that case.
-    ASSERT_EQ(static_cast<int>(hasher.GetHash()), static_cast<int>(hash));
+    uint16_t chars[2] = {c1, c2};
+    uint32_t check_hash = StringHasher::HashSequentialString(chars, 2, seed);
+    hash = (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
+    ASSERT_EQ(static_cast<int32_t>(hash), static_cast<int32_t>(check_hash));
 #endif
-    hash_ = hash;
   }
 
   bool IsMatch(Object* o) {
@@ -12673,8 +12723,8 @@
   }
 
  private:
-  uint32_t c1_;
-  uint32_t c2_;
+  uint16_t c1_;
+  uint16_t c2_;
   uint32_t hash_;
 };
 
@@ -12693,8 +12743,8 @@
 }
 
 
-bool SymbolTable::LookupTwoCharsSymbolIfExists(uint32_t c1,
-                                               uint32_t c2,
+bool SymbolTable::LookupTwoCharsSymbolIfExists(uint16_t c1,
+                                               uint16_t c2,
                                                String** symbol) {
   TwoCharHashTableKey key(c1, c2, GetHeap()->HashSeed());
   int entry = FindEntry(&key);
@@ -12709,21 +12759,21 @@
 }
 
 
-MaybeObject* SymbolTable::LookupSymbol(Vector<const char> str,
-                                       Object** s) {
+MaybeObject* SymbolTable::LookupUtf8Symbol(Vector<const char> str,
+                                           Object** s) {
   Utf8SymbolKey key(str, GetHeap()->HashSeed());
   return LookupKey(&key, s);
 }
 
 
-MaybeObject* SymbolTable::LookupAsciiSymbol(Vector<const char> str,
-                                            Object** s) {
+MaybeObject* SymbolTable::LookupOneByteSymbol(Vector<const char> str,
+                                              Object** s) {
   AsciiSymbolKey key(str, GetHeap()->HashSeed());
   return LookupKey(&key, s);
 }
 
 
-MaybeObject* SymbolTable::LookupSubStringAsciiSymbol(
+MaybeObject* SymbolTable::LookupSubStringOneByteSymbol(
     Handle<SeqOneByteString> str,
     int from,
     int length,
diff --git a/src/objects.h b/src/objects.h
index 6c9b31b..88194c4 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -756,6 +756,12 @@
 #define DECLARE_VERIFIER(Name)
 #endif
 
+#ifdef OBJECT_PRINT
+#define DECLARE_PRINTER(Name) void Name##Print(FILE* out = stdout);
+#else
+#define DECLARE_PRINTER(Name)
+#endif
+
 class MaybeObject BASE_EMBEDDED {
  public:
   inline bool IsFailure();
@@ -2109,12 +2115,7 @@
 
   // Dispatched behavior.
   void JSObjectShortPrint(StringStream* accumulator);
-#ifdef OBJECT_PRINT
-  inline void JSObjectPrint() {
-    JSObjectPrint(stdout);
-  }
-  void JSObjectPrint(FILE* out);
-#endif
+  DECLARE_PRINTER(JSObject)
   DECLARE_VERIFIER(JSObject)
 #ifdef OBJECT_PRINT
   inline void PrintProperties() {
@@ -2408,12 +2409,7 @@
   static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
 
   // Dispatched behavior.
-#ifdef OBJECT_PRINT
-  inline void FixedArrayPrint() {
-    FixedArrayPrint(stdout);
-  }
-  void FixedArrayPrint(FILE* out);
-#endif
+  DECLARE_PRINTER(FixedArray)
   DECLARE_VERIFIER(FixedArray)
 #ifdef DEBUG
   // Checks if two FixedArrays have identical contents.
@@ -2500,12 +2496,7 @@
   static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize;
 
   // Dispatched behavior.
-#ifdef OBJECT_PRINT
-  inline void FixedDoubleArrayPrint() {
-    FixedDoubleArrayPrint(stdout);
-  }
-  void FixedDoubleArrayPrint(FILE* out);
-#endif
+  DECLARE_PRINTER(FixedDoubleArray)
   DECLARE_VERIFIER(FixedDoubleArray)
 
  private:
@@ -3035,10 +3026,11 @@
   // added.  The return value is the symbol table which might have
   // been enlarged.  If the return value is not a failure, the symbol
   // pointer *s is set to the symbol found.
-  MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str, Object** s);
-  MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str,
-                                                 Object** s);
-  MUST_USE_RESULT MaybeObject* LookupSubStringAsciiSymbol(
+  MUST_USE_RESULT MaybeObject* LookupUtf8Symbol(Vector<const char> str,
+                                                Object** s);
+  MUST_USE_RESULT MaybeObject* LookupOneByteSymbol(Vector<const char> str,
+                                                   Object** s);
+  MUST_USE_RESULT MaybeObject* LookupSubStringOneByteSymbol(
       Handle<SeqOneByteString> str,
       int from,
       int length,
@@ -3051,7 +3043,7 @@
   // true if it is found, assigning the symbol to the given output
   // parameter.
   bool LookupSymbolIfExists(String* str, String** symbol);
-  bool LookupTwoCharsSymbolIfExists(uint32_t c1, uint32_t c2, String** symbol);
+  bool LookupTwoCharsSymbolIfExists(uint16_t c1, uint16_t c2, String** symbol);
 
   // Casting.
   static inline SymbolTable* cast(Object* obj);
@@ -3705,12 +3697,7 @@
   inline int ByteArraySize() {
     return SizeFor(this->length());
   }
-#ifdef OBJECT_PRINT
-  inline void ByteArrayPrint() {
-    ByteArrayPrint(stdout);
-  }
-  void ByteArrayPrint(FILE* out);
-#endif
+  DECLARE_PRINTER(ByteArray)
   DECLARE_VERIFIER(ByteArray)
 
   // Layout description.
@@ -3739,12 +3726,8 @@
   // Casting.
   static inline FreeSpace* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void FreeSpacePrint() {
-    FreeSpacePrint(stdout);
-  }
-  void FreeSpacePrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(FreeSpace)
   DECLARE_VERIFIER(FreeSpace)
 
   // Layout description.
@@ -3819,12 +3802,8 @@
   // Casting.
   static inline ExternalPixelArray* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void ExternalPixelArrayPrint() {
-    ExternalPixelArrayPrint(stdout);
-  }
-  void ExternalPixelArrayPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(ExternalPixelArray)
   DECLARE_VERIFIER(ExternalPixelArray)
 
  private:
@@ -3846,12 +3825,8 @@
   // Casting.
   static inline ExternalByteArray* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void ExternalByteArrayPrint() {
-    ExternalByteArrayPrint(stdout);
-  }
-  void ExternalByteArrayPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(ExternalByteArray)
   DECLARE_VERIFIER(ExternalByteArray)
 
  private:
@@ -3873,12 +3848,8 @@
   // Casting.
   static inline ExternalUnsignedByteArray* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void ExternalUnsignedByteArrayPrint() {
-    ExternalUnsignedByteArrayPrint(stdout);
-  }
-  void ExternalUnsignedByteArrayPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(ExternalUnsignedByteArray)
   DECLARE_VERIFIER(ExternalUnsignedByteArray)
 
  private:
@@ -3900,12 +3871,8 @@
   // Casting.
   static inline ExternalShortArray* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void ExternalShortArrayPrint() {
-    ExternalShortArrayPrint(stdout);
-  }
-  void ExternalShortArrayPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(ExternalShortArray)
   DECLARE_VERIFIER(ExternalShortArray)
 
  private:
@@ -3927,12 +3894,8 @@
   // Casting.
   static inline ExternalUnsignedShortArray* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void ExternalUnsignedShortArrayPrint() {
-    ExternalUnsignedShortArrayPrint(stdout);
-  }
-  void ExternalUnsignedShortArrayPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(ExternalUnsignedShortArray)
   DECLARE_VERIFIER(ExternalUnsignedShortArray)
 
  private:
@@ -3954,12 +3917,8 @@
   // Casting.
   static inline ExternalIntArray* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void ExternalIntArrayPrint() {
-    ExternalIntArrayPrint(stdout);
-  }
-  void ExternalIntArrayPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(ExternalIntArray)
   DECLARE_VERIFIER(ExternalIntArray)
 
  private:
@@ -3981,12 +3940,8 @@
   // Casting.
   static inline ExternalUnsignedIntArray* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void ExternalUnsignedIntArrayPrint() {
-    ExternalUnsignedIntArrayPrint(stdout);
-  }
-  void ExternalUnsignedIntArrayPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(ExternalUnsignedIntArray)
   DECLARE_VERIFIER(ExternalUnsignedIntArray)
 
  private:
@@ -4008,12 +3963,8 @@
   // Casting.
   static inline ExternalFloatArray* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void ExternalFloatArrayPrint() {
-    ExternalFloatArrayPrint(stdout);
-  }
-  void ExternalFloatArrayPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(ExternalFloatArray)
   DECLARE_VERIFIER(ExternalFloatArray)
 
  private:
@@ -4035,12 +3986,8 @@
   // Casting.
   static inline ExternalDoubleArray* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void ExternalDoubleArrayPrint() {
-    ExternalDoubleArrayPrint(stdout);
-  }
-  void ExternalDoubleArrayPrint(FILE* out);
-#endif  // OBJECT_PRINT
+  // Dispatched behavior.
+  DECLARE_PRINTER(ExternalDoubleArray)
   DECLARE_VERIFIER(ExternalDoubleArray)
 
  private:
@@ -4235,6 +4182,7 @@
   V(FUNCTION)             \
   V(OPTIMIZED_FUNCTION)   \
   V(STUB)                 \
+  V(COMPILED_STUB)        \
   V(BUILTIN)              \
   V(LOAD_IC)              \
   V(KEYED_LOAD_IC)        \
@@ -4263,6 +4211,8 @@
   // Flags.
   STATIC_ASSERT(LAST_CODE_KIND < 16);
 
+  static const char* Kind2String(Kind kind);
+
   // Types of stubs.
   enum StubType {
     NORMAL,
@@ -4284,7 +4234,6 @@
 
 #ifdef ENABLE_DISASSEMBLER
   // Printing
-  static const char* Kind2String(Kind kind);
   static const char* ICState2String(InlineCacheState state);
   static const char* StubType2String(StubType type);
   static void PrintExtraICState(FILE* out, Kind kind, ExtraICState extra);
@@ -4308,13 +4257,18 @@
   // [deoptimization_data]: Array containing data for deopt.
   DECL_ACCESSORS(deoptimization_data, FixedArray)
 
-  // [type_feedback_info]: Struct containing type feedback information.
+  // [type_feedback_info]: Struct containing type feedback information for
+  // unoptimized code. Optimized code can temporarily store the head of
+  // the list of the dependent optimized functions during deoptimization.
   // STUBs can use this slot to store arbitrary information as a Smi.
-  // Will contain either a TypeFeedbackInfo object, or undefined, or a Smi.
+  // Will contain either a TypeFeedbackInfo object, or JSFunction object,
+  // or undefined, or a Smi.
   DECL_ACCESSORS(type_feedback_info, Object)
   inline void InitializeTypeFeedbackInfoNoWriteBarrier(Object* value);
   inline int stub_info();
   inline void set_stub_info(int info);
+  inline Object* deoptimizing_functions();
+  inline void set_deoptimizing_functions(Object* value);
 
   // [gc_metadata]: Field used to hold GC related metadata. The contents of this
   // field does not have to be traced during garbage collection since
@@ -4552,12 +4506,8 @@
 
   template<typename StaticVisitor>
   inline void CodeIterateBody(Heap* heap);
-#ifdef OBJECT_PRINT
-  inline void CodePrint() {
-    CodePrint(stdout);
-  }
-  void CodePrint(FILE* out);
-#endif
+
+  DECLARE_PRINTER(Code)
   DECLARE_VERIFIER(Code)
 
   void ClearInlineCaches();
@@ -4579,6 +4529,8 @@
   static bool IsYoungSequence(byte* sequence);
   bool IsOld();
 
+  void PrintDeoptLocation(int bailout_id);
+
   // Max loop nesting marker used to postpose OSR. We don't take loop
   // nesting that is deeper than 5 levels into account.
   static const int kMaxLoopNestingMarker = 6;
@@ -4822,6 +4774,10 @@
   inline void set_elements_kind(ElementsKind elements_kind) {
     ASSERT(elements_kind < kElementsKindCount);
     ASSERT(kElementsKindCount <= (1 << kElementsKindBitCount));
+    ASSERT(!is_observed() ||
+           elements_kind == DICTIONARY_ELEMENTS ||
+           elements_kind == NON_STRICT_ARGUMENTS_ELEMENTS ||
+           IsExternalArrayElementsKind(elements_kind));
     set_bit_field2((bit_field2() & ~kElementsKindMask) |
         (elements_kind << kElementsKindShift));
     ASSERT(this->elements_kind() == elements_kind);
@@ -4850,6 +4806,10 @@
     return IsFastDoubleElementsKind(elements_kind());
   }
 
+  inline bool has_fast_elements() {
+    return IsFastElementsKind(elements_kind());
+  }
+
   inline bool has_non_strict_arguments_elements() {
     return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
   }
@@ -5133,12 +5093,7 @@
   void ZapTransitions();
 
   // Dispatched behavior.
-#ifdef OBJECT_PRINT
-  inline void MapPrint() {
-    MapPrint(stdout);
-  }
-  void MapPrint(FILE* out);
-#endif
+  DECLARE_PRINTER(Map)
   DECLARE_VERIFIER(Map)
 
 #ifdef VERIFY_HEAP
@@ -5184,8 +5139,7 @@
       kConstructorOffset + kPointerSize;
   static const int kDescriptorsOffset =
       kTransitionsOrBackPointerOffset + kPointerSize;
-  static const int kCodeCacheOffset =
-      kDescriptorsOffset + kPointerSize;
+  static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize;
   static const int kBitField3Offset = kCodeCacheOffset + kPointerSize;
   static const int kSize = kBitField3Offset + kPointerSize;
 
@@ -5339,12 +5293,8 @@
   // resource is accessible. Otherwise, always return true.
   inline bool HasValidSource();
 
-#ifdef OBJECT_PRINT
-  inline void ScriptPrint() {
-    ScriptPrint(stdout);
-  }
-  void ScriptPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(Script)
   DECLARE_VERIFIER(Script)
 
   static const int kSourceOffset = HeapObject::kHeaderSize;
@@ -5819,12 +5769,7 @@
   // Dispatched behavior.
   // Set max_length to -1 for unlimited length.
   void SourceCodePrint(StringStream* accumulator, int max_length);
-#ifdef OBJECT_PRINT
-  inline void SharedFunctionInfoPrint() {
-    SharedFunctionInfoPrint(stdout);
-  }
-  void SharedFunctionInfoPrint(FILE* out);
-#endif
+  DECLARE_PRINTER(SharedFunctionInfo)
   DECLARE_VERIFIER(SharedFunctionInfo)
 
   void ResetForNewContext(int new_ic_age);
@@ -6053,12 +5998,7 @@
   static inline JSModule* cast(Object* obj);
 
   // Dispatched behavior.
-#ifdef OBJECT_PRINT
-  inline void JSModulePrint() {
-    JSModulePrint(stdout);
-  }
-  void JSModulePrint(FILE* out);
-#endif
+  DECLARE_PRINTER(JSModule)
   DECLARE_VERIFIER(JSModule)
 
   // Layout description.
@@ -6212,12 +6152,7 @@
   void JSFunctionIterateBody(int object_size, ObjectVisitor* v);
 
   // Dispatched behavior.
-#ifdef OBJECT_PRINT
-  inline void JSFunctionPrint() {
-    JSFunctionPrint(stdout);
-  }
-  void JSFunctionPrint(FILE* out);
-#endif
+  DECLARE_PRINTER(JSFunction)
   DECLARE_VERIFIER(JSFunction)
 
   // Returns the number of allocated literals.
@@ -6226,6 +6161,18 @@
   // Retrieve the native context from a function's literal array.
   static Context* NativeContextFromLiterals(FixedArray* literals);
 
+#ifdef DEBUG
+  bool FunctionsInFunctionListShareSameCode() {
+    Object* current = this;
+    while (!current->IsUndefined()) {
+      JSFunction* function = JSFunction::cast(current);
+      current = function->next_function_link();
+      if (function->code() != this->code()) return false;
+    }
+    return true;
+  }
+#endif
+
   // Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
   // kSize) is weak and has special handling during garbage collection.
   static const int kCodeEntryOffset = JSObject::kHeaderSize;
@@ -6271,12 +6218,7 @@
   static inline JSGlobalProxy* cast(Object* obj);
 
   // Dispatched behavior.
-#ifdef OBJECT_PRINT
-  inline void JSGlobalProxyPrint() {
-    JSGlobalProxyPrint(stdout);
-  }
-  void JSGlobalProxyPrint(FILE* out);
-#endif
+  DECLARE_PRINTER(JSGlobalProxy)
   DECLARE_VERIFIER(JSGlobalProxy)
 
   // Layout description.
@@ -6349,12 +6291,7 @@
   static inline JSGlobalObject* cast(Object* obj);
 
   // Dispatched behavior.
-#ifdef OBJECT_PRINT
-  inline void JSGlobalObjectPrint() {
-    JSGlobalObjectPrint(stdout);
-  }
-  void JSGlobalObjectPrint(FILE* out);
-#endif
+  DECLARE_PRINTER(JSGlobalObject)
   DECLARE_VERIFIER(JSGlobalObject)
 
   // Layout description.
@@ -6381,12 +6318,7 @@
   static inline JSBuiltinsObject* cast(Object* obj);
 
   // Dispatched behavior.
-#ifdef OBJECT_PRINT
-  inline void JSBuiltinsObjectPrint() {
-    JSBuiltinsObjectPrint(stdout);
-  }
-  void JSBuiltinsObjectPrint(FILE* out);
-#endif
+  DECLARE_PRINTER(JSBuiltinsObject)
   DECLARE_VERIFIER(JSBuiltinsObject)
 
   // Layout description.  The size of the builtins object includes
@@ -6422,12 +6354,7 @@
   static inline JSValue* cast(Object* obj);
 
   // Dispatched behavior.
-#ifdef OBJECT_PRINT
-  inline void JSValuePrint() {
-    JSValuePrint(stdout);
-  }
-  void JSValuePrint(FILE* out);
-#endif
+  DECLARE_PRINTER(JSValue)
   DECLARE_VERIFIER(JSValue)
 
   // Layout description.
@@ -6476,12 +6403,7 @@
 
 
   // Dispatched behavior.
-#ifdef OBJECT_PRINT
-  inline void JSDatePrint() {
-    JSDatePrint(stdout);
-  }
-  void JSDatePrint(FILE* out);
-#endif
+  DECLARE_PRINTER(JSDate)
   DECLARE_VERIFIER(JSDate)
 
   // The order is important. It must be kept in sync with date macros
@@ -6573,12 +6495,7 @@
   static inline JSMessageObject* cast(Object* obj);
 
   // Dispatched behavior.
-#ifdef OBJECT_PRINT
-  inline void JSMessageObjectPrint() {
-    JSMessageObjectPrint(stdout);
-  }
-  void JSMessageObjectPrint(FILE* out);
-#endif
+  DECLARE_PRINTER(JSMessageObject)
   DECLARE_VERIFIER(JSMessageObject)
 
   // Layout description.
@@ -6819,12 +6736,8 @@
 
   static inline CodeCache* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void CodeCachePrint() {
-    CodeCachePrint(stdout);
-  }
-  void CodeCachePrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(CodeCache)
   DECLARE_VERIFIER(CodeCache)
 
   static const int kDefaultCacheOffset = HeapObject::kHeaderSize;
@@ -6908,12 +6821,8 @@
 
   static inline PolymorphicCodeCache* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void PolymorphicCodeCachePrint() {
-    PolymorphicCodeCachePrint(stdout);
-  }
-  void PolymorphicCodeCachePrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(PolymorphicCodeCache)
   DECLARE_VERIFIER(PolymorphicCodeCache)
 
   static const int kCacheOffset = HeapObject::kHeaderSize;
@@ -6961,12 +6870,8 @@
 
   static inline TypeFeedbackInfo* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void TypeFeedbackInfoPrint() {
-    TypeFeedbackInfoPrint(stdout);
-  }
-  void TypeFeedbackInfoPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(TypeFeedbackInfo)
   DECLARE_VERIFIER(TypeFeedbackInfo)
 
   static const int kStorage1Offset = HeapObject::kHeaderSize;
@@ -7007,12 +6912,8 @@
 
   static inline AliasedArgumentsEntry* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void AliasedArgumentsEntryPrint() {
-    AliasedArgumentsEntryPrint(stdout);
-  }
-  void AliasedArgumentsEntryPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(AliasedArgumentsEntry)
   DECLARE_VERIFIER(AliasedArgumentsEntry)
 
   static const int kAliasedContextSlot = HeapObject::kHeaderSize;
@@ -7031,30 +6932,15 @@
  public:
   explicit inline StringHasher(int length, uint32_t seed);
 
-  // Returns true if the hash of this string can be computed without
-  // looking at the contents.
-  inline bool has_trivial_hash();
+  template <typename schar>
+  static inline uint32_t HashSequentialString(const schar* chars,
+                                              int length,
+                                              uint32_t seed);
 
-  // Add a character to the hash and update the array index calculation.
-  inline void AddCharacter(uint32_t c);
-
-  // Adds a character to the hash but does not update the array index
-  // calculation.  This can only be called when it has been verified
-  // that the input is not an array index.
-  inline void AddCharacterNoIndex(uint32_t c);
-
-  // Add a character above 0xffff as a surrogate pair.  These can get into
-  // the hasher through the routines that take a UTF-8 string and make a symbol.
-  void AddSurrogatePair(uc32 c);
-  void AddSurrogatePairNoIndex(uc32 c);
-
-  // Returns the value to store in the hash field of a string with
-  // the given length and contents.
-  uint32_t GetHashField();
-
-  // Returns true if the characters seen so far make up a legal array
-  // index.
-  bool is_array_index() { return is_array_index_; }
+  // Reads all the data, even for long strings and computes the utf16 length.
+  static uint32_t ComputeUtf8Hash(Vector<const char> chars,
+                                  uint32_t seed,
+                                  int* utf16_length_out);
 
   // Calculated hash value for a string consisting of 1 to
   // String::kMaxArrayIndexSize digits with no leading zeros (except "0").
@@ -7066,51 +6952,36 @@
   // use 27 instead.
   static const int kZeroHash = 27;
 
- private:
-  uint32_t array_index() {
-    ASSERT(is_array_index());
-    return array_index_;
-  }
-
-  inline uint32_t GetHash();
-
   // Reusable parts of the hashing algorithm.
-  INLINE(static uint32_t AddCharacterCore(uint32_t running_hash, uint32_t c));
+  INLINE(static uint32_t AddCharacterCore(uint32_t running_hash, uint16_t c));
   INLINE(static uint32_t GetHashCore(uint32_t running_hash));
 
+ protected:
+  // Returns the value to store in the hash field of a string with
+  // the given length and contents.
+  uint32_t GetHashField();
+  // Returns true if the hash of this string can be computed without
+  // looking at the contents.
+  inline bool has_trivial_hash();
+  // Adds a block of characters to the hash.
+  template<typename Char>
+  inline void AddCharacters(const Char* chars, int len);
+
+ private:
+  // Add a character to the hash.
+  inline void AddCharacter(uint16_t c);
+  // Update index. Returns true if string is still an index.
+  inline bool UpdateIndex(uint16_t c);
+
   int length_;
   uint32_t raw_running_hash_;
   uint32_t array_index_;
   bool is_array_index_;
   bool is_first_char_;
-  friend class TwoCharHashTableKey;
-
-  template <bool seq_ascii> friend class JsonParser;
+  DISALLOW_COPY_AND_ASSIGN(StringHasher);
 };
 
 
-class IncrementalAsciiStringHasher {
- public:
-  explicit inline IncrementalAsciiStringHasher(uint32_t seed, char first_char);
-  inline void AddCharacter(uc32 c);
-  inline uint32_t GetHash();
-
- private:
-  int length_;
-  uint32_t raw_running_hash_;
-  uint32_t array_index_;
-  bool is_array_index_;
-  char first_char_;
-};
-
-
-// Calculates string hash.
-template <typename schar>
-inline uint32_t HashSequentialString(const schar* chars,
-                                     int length,
-                                     uint32_t seed);
-
-
 // The characteristics of a string are stored in its map.  Retrieving these
 // few bits of information is moderately expensive, involving two memory
 // loads where the second is dependent on the first.  To improve efficiency
@@ -7329,10 +7200,6 @@
   // Returns a hash value used for the property table
   inline uint32_t Hash();
 
-  static uint32_t ComputeHashField(unibrow::CharacterStream* buffer,
-                                   int length,
-                                   uint32_t seed);
-
   static bool ComputeArrayIndex(unibrow::CharacterStream* buffer,
                                 uint32_t* index,
                                 int length);
@@ -7514,7 +7381,7 @@
   static inline void Visit(String* string,
                            unsigned offset,
                            Visitor& visitor,
-                           ConsOp& consOp,
+                           ConsOp& cons_op,
                            int32_t type,
                            unsigned length);
 
@@ -7972,22 +7839,30 @@
 };
 
 
+// A ConsStringOp that returns null.
+// Useful when the operation to apply on a ConsString
+// requires an expensive data structure.
+class ConsStringNullOp {
+ public:
+  inline ConsStringNullOp() {}
+  static inline String* Operate(String*, unsigned*, int32_t*, unsigned*);
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ConsStringNullOp);
+};
+
+
 // This maintains an off-stack representation of the stack frames required
 // to traverse a ConsString, allowing an entirely iterative and restartable
 // traversal of the entire string
 // Note: this class is not GC-safe.
 class ConsStringIteratorOp {
  public:
-  struct ContinueResponse {
-    String* string_;
-    unsigned offset_;
-    unsigned length_;
-    int32_t type_;
-  };
   inline ConsStringIteratorOp() {}
-  String* Operate(ConsString* consString, unsigned* outerOffset,
-      int32_t* typeOut, unsigned* lengthOut);
-  inline bool ContinueOperation(ContinueResponse* response);
+  String* Operate(String* string,
+                  unsigned* offset_out,
+                  int32_t* type_out,
+                  unsigned* length_out);
+  inline String* ContinueOperation(int32_t* type_out, unsigned* length_out);
   inline void Reset();
   inline bool HasMore();
 
@@ -7998,25 +7873,23 @@
   static const unsigned kDepthMask = kStackSize-1;
   STATIC_ASSERT(IS_POWER_OF_TWO(kStackSize));
   static inline unsigned OffsetForDepth(unsigned depth);
-  static inline uint32_t MaskForDepth(unsigned depth);
 
-  inline void ClearRightDescent();
-  inline void SetRightDescent();
   inline void PushLeft(ConsString* string);
-  inline void PushRight(ConsString* string, int32_t type);
+  inline void PushRight(ConsString* string);
   inline void AdjustMaximumDepth();
   inline void Pop();
-  inline void ResetStack();
-  String* NextLeaf(bool* blewStack, int32_t* typeOut);
+  String* NextLeaf(bool* blew_stack, int32_t* type_out, unsigned* length_out);
+  String* Search(unsigned* offset_out,
+                 int32_t* type_out,
+                 unsigned* length_out);
 
   unsigned depth_;
   unsigned maximum_depth_;
-  uint32_t trace_;
+  // Stack must always contain only frames for which right traversal
+  // has not yet been performed.
   ConsString* frames_[kStackSize];
   unsigned consumed_;
   ConsString* root_;
-  int32_t root_type_;
-  unsigned root_length_;
   DISALLOW_COPY_AND_ASSIGN(ConsStringIteratorOp);
 };
 
@@ -8024,8 +7897,9 @@
 // Note: this class is not GC-safe.
 class StringCharacterStream {
  public:
-  inline StringCharacterStream(
-      String* string, unsigned offset, ConsStringIteratorOp* op);
+  inline StringCharacterStream(String* string,
+                               unsigned offset,
+                               ConsStringIteratorOp* op);
   inline uint16_t GetNext();
   inline bool HasMore();
   inline void Reset(String* string, unsigned offset, ConsStringIteratorOp* op);
@@ -8124,15 +7998,10 @@
     return address() + kValueOffset;
   }
 
+  // Dispatched behavior.
+  DECLARE_PRINTER(JSGlobalPropertyCell)
   DECLARE_VERIFIER(JSGlobalPropertyCell)
 
-#ifdef OBJECT_PRINT
-  inline void JSGlobalPropertyCellPrint() {
-    JSGlobalPropertyCellPrint(stdout);
-  }
-  void JSGlobalPropertyCellPrint(FILE* out);
-#endif
-
   // Layout description.
   static const int kValueOffset = HeapObject::kHeaderSize;
   static const int kSize = kValueOffset + kPointerSize;
@@ -8222,12 +8091,7 @@
                           Handle<Object> args[]);
 
   // Dispatched behavior.
-#ifdef OBJECT_PRINT
-  inline void JSProxyPrint() {
-    JSProxyPrint(stdout);
-  }
-  void JSProxyPrint(FILE* out);
-#endif
+  DECLARE_PRINTER(JSProxy)
   DECLARE_VERIFIER(JSProxy)
 
   // Layout description. We add padding so that a proxy has the same
@@ -8263,12 +8127,7 @@
   static inline JSFunctionProxy* cast(Object* obj);
 
   // Dispatched behavior.
-#ifdef OBJECT_PRINT
-  inline void JSFunctionProxyPrint() {
-    JSFunctionProxyPrint(stdout);
-  }
-  void JSFunctionProxyPrint(FILE* out);
-#endif
+  DECLARE_PRINTER(JSFunctionProxy)
   DECLARE_VERIFIER(JSFunctionProxy)
 
   // Layout description.
@@ -8298,12 +8157,8 @@
   // Casting.
   static inline JSSet* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void JSSetPrint() {
-    JSSetPrint(stdout);
-  }
-  void JSSetPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(JSSet)
   DECLARE_VERIFIER(JSSet)
 
   static const int kTableOffset = JSObject::kHeaderSize;
@@ -8323,12 +8178,8 @@
   // Casting.
   static inline JSMap* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void JSMapPrint() {
-    JSMapPrint(stdout);
-  }
-  void JSMapPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(JSMap)
   DECLARE_VERIFIER(JSMap)
 
   static const int kTableOffset = JSObject::kHeaderSize;
@@ -8351,12 +8202,8 @@
   // Casting.
   static inline JSWeakMap* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void JSWeakMapPrint() {
-    JSWeakMapPrint(stdout);
-  }
-  void JSWeakMapPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(JSWeakMap)
   DECLARE_VERIFIER(JSWeakMap)
 
   static const int kTableOffset = JSObject::kHeaderSize;
@@ -8386,12 +8233,8 @@
   template<typename StaticVisitor>
   inline void ForeignIterateBody();
 
-#ifdef OBJECT_PRINT
-  inline void ForeignPrint() {
-    ForeignPrint(stdout);
-  }
-  void ForeignPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(Foreign)
   DECLARE_VERIFIER(Foreign)
 
   // Layout description.
@@ -8444,12 +8287,7 @@
   inline void EnsureSize(int minimum_size_of_backing_fixed_array);
 
   // Dispatched behavior.
-#ifdef OBJECT_PRINT
-  inline void JSArrayPrint() {
-    JSArrayPrint(stdout);
-  }
-  void JSArrayPrint(FILE* out);
-#endif
+  DECLARE_PRINTER(JSArray)
   DECLARE_VERIFIER(JSArray)
 
   // Number of element slots to pre-allocate for an empty array.
@@ -8523,12 +8361,8 @@
 
   static inline AccessorInfo* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void AccessorInfoPrint() {
-    AccessorInfoPrint(stdout);
-  }
-  void AccessorInfoPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(AccessorInfo)
   DECLARE_VERIFIER(AccessorInfo)
 
   static const int kGetterOffset = HeapObject::kHeaderSize;
@@ -8590,9 +8424,8 @@
     return IsJSAccessor(getter()) || IsJSAccessor(setter());
   }
 
-#ifdef OBJECT_PRINT
-  void AccessorPairPrint(FILE* out = stdout);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(AccessorPair)
   DECLARE_VERIFIER(AccessorPair)
 
   static const int kGetterOffset = HeapObject::kHeaderSize;
@@ -8621,12 +8454,8 @@
 
   static inline AccessCheckInfo* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void AccessCheckInfoPrint() {
-    AccessCheckInfoPrint(stdout);
-  }
-  void AccessCheckInfoPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(AccessCheckInfo)
   DECLARE_VERIFIER(AccessCheckInfo)
 
   static const int kNamedCallbackOffset   = HeapObject::kHeaderSize;
@@ -8650,12 +8479,8 @@
 
   static inline InterceptorInfo* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void InterceptorInfoPrint() {
-    InterceptorInfoPrint(stdout);
-  }
-  void InterceptorInfoPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(InterceptorInfo)
   DECLARE_VERIFIER(InterceptorInfo)
 
   static const int kGetterOffset = HeapObject::kHeaderSize;
@@ -8678,12 +8503,8 @@
 
   static inline CallHandlerInfo* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void CallHandlerInfoPrint() {
-    CallHandlerInfoPrint(stdout);
-  }
-  void CallHandlerInfoPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(CallHandlerInfo)
   DECLARE_VERIFIER(CallHandlerInfo)
 
   static const int kCallbackOffset = HeapObject::kHeaderSize;
@@ -8727,6 +8548,9 @@
   DECL_ACCESSORS(access_check_info, Object)
   DECL_ACCESSORS(flag, Smi)
 
+  inline int length();
+  inline void set_length(int value);
+
   // Following properties use flag bits.
   DECL_BOOLEAN_ACCESSORS(hidden_prototype)
   DECL_BOOLEAN_ACCESSORS(undetectable)
@@ -8737,12 +8561,8 @@
 
   static inline FunctionTemplateInfo* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void FunctionTemplateInfoPrint() {
-    FunctionTemplateInfoPrint(stdout);
-  }
-  void FunctionTemplateInfoPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(FunctionTemplateInfo)
   DECLARE_VERIFIER(FunctionTemplateInfo)
 
   static const int kSerialNumberOffset = TemplateInfo::kHeaderSize;
@@ -8764,7 +8584,8 @@
   static const int kAccessCheckInfoOffset =
       kInstanceCallHandlerOffset + kPointerSize;
   static const int kFlagOffset = kAccessCheckInfoOffset + kPointerSize;
-  static const int kSize = kFlagOffset + kPointerSize;
+  static const int kLengthOffset = kFlagOffset + kPointerSize;
+  static const int kSize = kLengthOffset + kPointerSize;
 
  private:
   // Bit position in the flag, from least significant bit position.
@@ -8784,12 +8605,8 @@
 
   static inline ObjectTemplateInfo* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void ObjectTemplateInfoPrint() {
-    ObjectTemplateInfoPrint(stdout);
-  }
-  void ObjectTemplateInfoPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(ObjectTemplateInfo)
   DECLARE_VERIFIER(ObjectTemplateInfo)
 
   static const int kConstructorOffset = TemplateInfo::kHeaderSize;
@@ -8806,12 +8623,8 @@
 
   static inline SignatureInfo* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void SignatureInfoPrint() {
-    SignatureInfoPrint(stdout);
-  }
-  void SignatureInfoPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(SignatureInfo)
   DECLARE_VERIFIER(SignatureInfo)
 
   static const int kReceiverOffset = Struct::kHeaderSize;
@@ -8829,12 +8642,8 @@
 
   static inline TypeSwitchInfo* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void TypeSwitchInfoPrint() {
-    TypeSwitchInfoPrint(stdout);
-  }
-  void TypeSwitchInfoPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(TypeSwitchInfo)
   DECLARE_VERIFIER(TypeSwitchInfo)
 
   static const int kTypesOffset = Struct::kHeaderSize;
@@ -8879,12 +8688,8 @@
 
   static inline DebugInfo* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void DebugInfoPrint() {
-    DebugInfoPrint(stdout);
-  }
-  void DebugInfoPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(DebugInfo)
   DECLARE_VERIFIER(DebugInfo)
 
   static const int kSharedFunctionInfoIndex = Struct::kHeaderSize;
@@ -8935,12 +8740,8 @@
 
   static inline BreakPointInfo* cast(Object* obj);
 
-#ifdef OBJECT_PRINT
-  inline void BreakPointInfoPrint() {
-    BreakPointInfoPrint(stdout);
-  }
-  void BreakPointInfoPrint(FILE* out);
-#endif
+  // Dispatched behavior.
+  DECLARE_PRINTER(BreakPointInfo)
   DECLARE_VERIFIER(BreakPointInfo)
 
   static const int kCodePositionIndex = Struct::kHeaderSize;
diff --git a/src/optimizing-compiler-thread.h b/src/optimizing-compiler-thread.h
index 2d56d1a..7aad78c 100644
--- a/src/optimizing-compiler-thread.h
+++ b/src/optimizing-compiler-thread.h
@@ -36,7 +36,7 @@
 namespace v8 {
 namespace internal {
 
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
 class OptimizingCompiler;
 class SharedFunctionInfo;
 
diff --git a/src/parser.cc b/src/parser.cc
index 94d2f9e..6365e41 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -254,7 +254,7 @@
   if (static_cast<unsigned>(symbol_id)
       >= static_cast<unsigned>(symbol_cache_.length())) {
     if (scanner().is_literal_ascii()) {
-      return isolate()->factory()->LookupAsciiSymbol(
+      return isolate()->factory()->LookupOneByteSymbol(
           scanner().literal_ascii_string());
     } else {
       return isolate()->factory()->LookupTwoByteSymbol(
@@ -275,7 +275,7 @@
   Handle<String> result = symbol_cache_.at(symbol_id);
   if (result.is_null()) {
     if (scanner().is_literal_ascii()) {
-      result = isolate()->factory()->LookupAsciiSymbol(
+      result = isolate()->factory()->LookupOneByteSymbol(
           scanner().literal_ascii_string());
     } else {
       result = isolate()->factory()->LookupTwoByteSymbol(
@@ -3006,6 +3006,7 @@
   // side expression.  We could report this as a syntax error here but
   // for compatibility with JSC we choose to report the error at
   // runtime.
+  // TODO(ES5): Should change parsing for spec conformance.
   if (expression == NULL || !expression->IsValidLeftHandSide()) {
     Handle<String> type =
         isolate()->factory()->invalid_lhs_in_assignment_symbol();
@@ -3988,7 +3989,7 @@
       next == Token::STRING || is_keyword) {
     Handle<String> name;
     if (is_keyword) {
-      name = isolate_->factory()->LookupAsciiSymbol(Token::String(next));
+      name = isolate_->factory()->LookupUtf8Symbol(Token::String(next));
     } else {
       name = GetSymbol(CHECK_OK);
     }
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 8c5e5b9..3456d32 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -660,7 +660,7 @@
       if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
         SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
       } else {
-        if (rate_limiter_.SuspendIfNecessary()) continue;
+        if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
       }
       OS::Sleep(interval_);
     }
@@ -708,7 +708,6 @@
   }
 
   const int interval_;
-  RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
   static Mutex* mutex_;
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index d02d668..62653b4 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -198,6 +198,31 @@
 }
 
 
+void OS::DumpBacktrace() {
+  void* trace[100];
+  int size = backtrace(trace, ARRAY_SIZE(trace));
+  char** symbols = backtrace_symbols(trace, size);
+  fprintf(stderr, "\n==== C stack trace ===============================\n\n");
+  if (size == 0) {
+    fprintf(stderr, "(empty)\n");
+  } else if (symbols == NULL) {
+    fprintf(stderr, "(no symbols)\n");
+  } else {
+    for (int i = 1; i < size; ++i) {
+      fprintf(stderr, "%2d: ", i);
+      char mangled[201];
+      if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) {  // NOLINT
+        fprintf(stderr, "%s\n", mangled);
+      } else {
+        fprintf(stderr, "??\n");
+      }
+    }
+  }
+  fflush(stderr);
+  free(symbols);
+}
+
+
 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
  public:
   PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -767,7 +792,7 @@
       if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
         SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
       } else {
-        if (rate_limiter_.SuspendIfNecessary()) continue;
+        if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
       }
       Sleep();  // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
     }
@@ -802,7 +827,6 @@
   }
 
   const int interval_;
-  RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
   static Mutex* mutex_;
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 7a18641..ce45bab 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -38,6 +38,11 @@
 #include <sys/types.h>
 #include <stdlib.h>
 
+#if defined(__GLIBC__)
+#include <execinfo.h>
+#include <cxxabi.h>
+#endif
+
 // Ubuntu Dapper requires memory pages to be marked as
 // executable. Otherwise, OS raises an exception when executing code
 // in that page.
@@ -415,6 +420,37 @@
 }
 
 
+void OS::DumpBacktrace() {
+#if defined(__GLIBC__)
+  void* trace[100];
+  int size = backtrace(trace, ARRAY_SIZE(trace));
+  char** symbols = backtrace_symbols(trace, size);
+  fprintf(stderr, "\n==== C stack trace ===============================\n\n");
+  if (size == 0) {
+    fprintf(stderr, "(empty)\n");
+  } else if (symbols == NULL) {
+    fprintf(stderr, "(no symbols)\n");
+  } else {
+    for (int i = 1; i < size; ++i) {
+      fprintf(stderr, "%2d: ", i);
+      char mangled[201];
+      if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) {  // NOLINT
+        int status;
+        size_t length;
+        char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
+        fprintf(stderr, "%s\n", demangled ? demangled : mangled);
+        free(demangled);
+      } else {
+        fprintf(stderr, "??\n");
+      }
+    }
+  }
+  fflush(stderr);
+  free(symbols);
+#endif
+}
+
+
 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
  public:
   PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -1153,7 +1189,7 @@
         SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
       } else {
         if (signal_handler_installed_) RestoreSignalHandler();
-        if (rate_limiter_.SuspendIfNecessary()) continue;
+        if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
       }
       Sleep();  // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
     }
@@ -1198,7 +1234,6 @@
 
   const int vm_tgid_;
   const int interval_;
-  RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
   static Mutex* mutex_;
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index e698338..899405e 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -182,6 +182,11 @@
 }
 
 
+void OS::DumpBacktrace() {
+  // Currently unsupported.
+}
+
+
 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
  public:
   PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -792,7 +797,7 @@
       if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
         SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
       } else {
-        if (rate_limiter_.SuspendIfNecessary()) continue;
+        if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
       }
       OS::Sleep(interval_);
     }
@@ -851,7 +856,6 @@
   }
 
   const int interval_;
-  RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
   static Mutex* mutex_;
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index ccd2123..f5ae5ec 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -277,6 +277,11 @@
 }
 
 
+void OS::DumpBacktrace() {
+  // Currently unsupported.
+}
+
+
 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
   UNIMPLEMENTED();
   return NULL;
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index d4ab9a6..7fdd5b2 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -215,6 +215,11 @@
 }
 
 
+void OS::DumpBacktrace() {
+  // Currently unsupported.
+}
+
+
 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
  public:
   PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -846,7 +851,7 @@
         SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
       } else {
         if (signal_handler_installed_) RestoreSignalHandler();
-        if (rate_limiter_.SuspendIfNecessary()) continue;
+        if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
       }
       Sleep();  // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
     }
@@ -882,7 +887,6 @@
 
   const int vm_tgid_;
   const int interval_;
-  RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
   static Mutex* mutex_;
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 70f8659..9724cd1 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -202,6 +202,11 @@
 }
 
 
+void OS::DumpBacktrace() {
+  // Currently unsupported.
+}
+
+
 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
  public:
   PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -762,7 +767,7 @@
         SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
       } else {
         if (signal_handler_installed_) RestoreSignalHandler();
-        if (rate_limiter_.SuspendIfNecessary()) continue;
+        if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
       }
       Sleep();  // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
     }
@@ -797,7 +802,6 @@
   }
 
   const int interval_;
-  RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
   static Mutex* mutex_;
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index f00d6b0..0f0143a 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -987,6 +987,11 @@
 }
 
 
+void OS::DumpBacktrace() {
+  // Currently unsupported.
+}
+
+
 class Win32MemoryMappedFile : public OS::MemoryMappedFile {
  public:
   Win32MemoryMappedFile(HANDLE file,
@@ -2015,7 +2020,7 @@
       if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
         SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
       } else {
-        if (rate_limiter_.SuspendIfNecessary()) continue;
+        if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
       }
       OS::Sleep(interval_);
     }
@@ -2063,7 +2068,6 @@
   }
 
   const int interval_;
-  RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
   static Mutex* mutex_;
diff --git a/src/platform.h b/src/platform.h
index 6f75ca8..a1036c4 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -245,6 +245,9 @@
   // Debug break.
   static void DebugBreak();
 
+  // Dump C++ current stack trace (only functional on Linux).
+  static void DumpBacktrace();
+
   // Walk the stack.
   static const int kStackWalkError = -1;
   static const int kStackWalkMaxNameLen = 256;
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 602fbb4..c0fdf48 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -42,6 +42,7 @@
   output_ = NULL;
   size_ = 0;
   pos_ = 0;
+  InitializeAstVisitor();
 }
 
 
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index 9ac7257..41175ab 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -74,6 +74,8 @@
   void PrintDeclarations(ZoneList<Declaration*>* declarations);
   void PrintFunctionLiteral(FunctionLiteral* function);
   void PrintCaseClause(CaseClause* clause);
+
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 };
 
 
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 6e49c7b..e14e0f3 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -112,7 +112,7 @@
   OS::StrNCpy(dst, src, len);
   dst[len] = '\0';
   uint32_t hash =
-      HashSequentialString(dst.start(), len, HEAP->HashSeed());
+      StringHasher::HashSequentialString(dst.start(), len, HEAP->HashSeed());
   return AddOrDisposeString(dst.start(), hash);
 }
 
@@ -145,7 +145,7 @@
     DeleteArray(str.start());
     return format;
   }
-  uint32_t hash = HashSequentialString(
+  uint32_t hash = StringHasher::HashSequentialString(
       str.start(), len, HEAP->HashSeed());
   return AddOrDisposeString(str.start(), hash);
 }
@@ -156,8 +156,8 @@
     int length = Min(kMaxNameSize, name->length());
     SmartArrayPointer<char> data =
         name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length);
-    uint32_t hash =
-        HashSequentialString(*data, length, name->GetHeap()->HashSeed());
+    uint32_t hash = StringHasher::HashSequentialString(
+        *data, length, name->GetHeap()->HashSeed());
     return AddOrDisposeString(data.Detach(), hash);
   }
   return "";
@@ -1451,9 +1451,9 @@
 SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
   SnapshotObjectId id = static_cast<SnapshotObjectId>(info->GetHash());
   const char* label = info->GetLabel();
-  id ^= HashSequentialString(label,
-                             static_cast<int>(strlen(label)),
-                             HEAP->HashSeed());
+  id ^= StringHasher::HashSequentialString(label,
+                                           static_cast<int>(strlen(label)),
+                                           HEAP->HashSeed());
   intptr_t element_count = info->GetElementCount();
   if (element_count != -1)
     id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count),
@@ -2708,10 +2708,6 @@
   Isolate* isolate = Isolate::Current();
   GlobalObjectsEnumerator enumerator;
   isolate->global_handles()->IterateAllRoots(&enumerator);
-  Handle<String> document_string =
-      isolate->factory()->NewStringFromAscii(CStrVector("document"));
-  Handle<String> url_string =
-      isolate->factory()->NewStringFromAscii(CStrVector("URL"));
   const char** urls = NewArray<const char*>(enumerator.count());
   for (int i = 0, l = enumerator.count(); i < l; ++i) {
     if (global_object_name_resolver_) {
@@ -2720,25 +2716,7 @@
       urls[i] = global_object_name_resolver_->GetName(
           Utils::ToLocal(Handle<JSObject>::cast(global_obj)));
     } else {
-      // TODO(yurys): This branch is going to be removed once Chromium migrates
-      // to the new name resolver.
       urls[i] = NULL;
-      HandleScope scope;
-      Handle<JSGlobalObject> global_obj = enumerator.at(i);
-      Object* obj_document;
-      if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
-          obj_document->IsJSObject()) {
-        // FixMe: Workaround: SharedWorker's current Isolate has NULL context.
-        // As result GetProperty(*url_string) will crash.
-        if (!Isolate::Current()->context() && obj_document->IsJSGlobalProxy())
-          continue;
-        JSObject* document = JSObject::cast(obj_document);
-        Object* obj_url;
-        if (document->GetProperty(*url_string)->ToObject(&obj_url) &&
-            obj_url->IsString()) {
-          urls[i] = collection_->names()->GetName(String::cast(obj_url));
-        }
-      }
     }
   }
 
@@ -2962,9 +2940,10 @@
 NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
     const char* label) {
   const char* label_copy = collection_->names()->GetCopy(label);
-  uint32_t hash = HashSequentialString(label_copy,
-                                       static_cast<int>(strlen(label_copy)),
-                                       HEAP->HashSeed());
+  uint32_t hash = StringHasher::HashSequentialString(
+      label_copy,
+      static_cast<int>(strlen(label_copy)),
+      HEAP->HashSeed());
   HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy),
                                                 hash, true);
   if (entry->value == NULL) {
diff --git a/src/property.h b/src/property.h
index c41c6dc..1cadd57 100644
--- a/src/property.h
+++ b/src/property.h
@@ -310,6 +310,26 @@
     return IsFound() && !IsTransition();
   }
 
+  bool IsDataProperty() {
+    switch (type()) {
+      case FIELD:
+      case NORMAL:
+      case CONSTANT_FUNCTION:
+        return true;
+      case CALLBACKS: {
+        Object* callback = GetCallbackObject();
+        return callback->IsAccessorInfo() || callback->IsForeign();
+      }
+      case HANDLER:
+      case INTERCEPTOR:
+      case TRANSITION:
+      case NONEXISTENT:
+        return false;
+    }
+    UNREACHABLE();
+    return false;
+  }
+
   bool IsCacheable() { return cacheable_; }
   void DisallowCaching() { cacheable_ = false; }
 
@@ -327,9 +347,15 @@
       }
       case CONSTANT_FUNCTION:
         return GetConstantFunction();
-      default:
+      case CALLBACKS:
+      case HANDLER:
+      case INTERCEPTOR:
+      case TRANSITION:
+      case NONEXISTENT:
         return Isolate::Current()->heap()->the_hole_value();
     }
+    UNREACHABLE();
+    return NULL;
   }
 
   Map* GetTransitionTarget() {
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 2a98787..02907de 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -43,7 +43,9 @@
         result_assigned_(false),
         is_set_(false),
         in_try_(false),
-        factory_(isolate(), zone) { }
+        factory_(Isolate::Current(), zone) {
+    InitializeAstVisitor();
+  }
 
   virtual ~Processor() { }
 
@@ -86,6 +88,8 @@
 #undef DEF_VISIT
 
   void VisitIterationStatement(IterationStatement* stmt);
+
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 };
 
 
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 262cd1d..94a5650 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -430,11 +430,6 @@
 }
 
 
-bool RuntimeProfiler::IsSomeIsolateInJS() {
-  return NoBarrier_Load(&state_) > 0;
-}
-
-
 bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
   Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
   ASSERT(old_state >= -1);
@@ -484,12 +479,4 @@
 }
 
 
-bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
-  if (!RuntimeProfiler::IsSomeIsolateInJS()) {
-    return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
-  }
-  return false;
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
index 507535f..62c48c7 100644
--- a/src/runtime-profiler.h
+++ b/src/runtime-profiler.h
@@ -71,17 +71,12 @@
 
   // Profiler thread interface.
   //
-  // IsSomeIsolateInJS():
-  // The profiler thread can query whether some isolate is currently
-  // running JavaScript code.
-  //
   // WaitForSomeIsolateToEnterJS():
   // When no isolates are running JavaScript code for some time the
   // profiler thread suspends itself by calling the wait function. The
   // wait function returns true after it waited or false immediately.
   // While the function was waiting the profiler may have been
   // disabled so it *must check* whether it is allowed to continue.
-  static bool IsSomeIsolateInJS();
   static bool WaitForSomeIsolateToEnterJS();
 
   // Stops the runtime profiler thread when profiling support is being
@@ -134,24 +129,6 @@
 };
 
 
-// Rate limiter intended to be used in the profiler thread.
-class RuntimeProfilerRateLimiter BASE_EMBEDDED {
- public:
-  RuntimeProfilerRateLimiter() {}
-
-  // Suspends the current thread (which must be the profiler thread)
-  // when not executing JavaScript to minimize CPU usage. Returns
-  // whether the thread was suspended (and so must check whether
-  // profiling is still active.)
-  //
-  // Does nothing when runtime profiling is not enabled.
-  bool SuspendIfNecessary();
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(RuntimeProfilerRateLimiter);
-};
-
-
 // Implementation of RuntimeProfiler inline functions.
 
 void RuntimeProfiler::IsolateEnteredJS(Isolate* isolate) {
diff --git a/src/runtime.cc b/src/runtime.cc
index 4464431..f6c04dc 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1843,7 +1843,7 @@
                                          Handle<JSObject> holder,
                                          const char* name,
                                          Builtins::Name builtin_name) {
-  Handle<String> key = isolate->factory()->LookupAsciiSymbol(name);
+  Handle<String> key = isolate->factory()->LookupUtf8Symbol(name);
   Handle<Code> code(isolate->builtins()->builtin(builtin_name));
   Handle<JSFunction> optimized =
       isolate->factory()->NewFunction(key,
@@ -4376,6 +4376,7 @@
   Handle<Object> object = args.at<Object>(0);
   if (object->IsJSObject()) {
     Handle<JSObject> js_object(Handle<JSObject>::cast(object));
+    ASSERT(!js_object->map()->is_observed());
     ElementsKind new_kind = js_object->HasFastHoleyElements()
         ? FAST_HOLEY_DOUBLE_ELEMENTS
         : FAST_DOUBLE_ELEMENTS;
@@ -4392,6 +4393,7 @@
   Handle<Object> object = args.at<Object>(0);
   if (object->IsJSObject()) {
     Handle<JSObject> js_object(Handle<JSObject>::cast(object));
+    ASSERT(!js_object->map()->is_observed());
     ElementsKind new_kind = js_object->HasFastHoleyElements()
         ? FAST_HOLEY_ELEMENTS
         : FAST_ELEMENTS;
@@ -7842,8 +7844,8 @@
   HandleScope handle_scope(isolate);
   ASSERT(FLAG_parallel_recompilation && FLAG_manual_parallel_recompilation);
   if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
-    return isolate->Throw(
-        *isolate->factory()->LookupAsciiSymbol("Recompile queue is full."));
+    return isolate->Throw(*isolate->factory()->LookupOneByteSymbol(
+        STATIC_ASCII_VECTOR("Recompile queue is full.")));
   }
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
   fun->ReplaceCode(isolate->builtins()->builtin(Builtins::kParallelRecompile));
@@ -7889,6 +7891,17 @@
 };
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyICMiss) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 0);
+  Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+  ASSERT(isolate->heap()->IsAllocationAllowed());
+  ASSERT(deoptimizer->compiled_code_kind() == Code::COMPILED_STUB);
+  delete deoptimizer;
+  return isolate->heap()->undefined_value();
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -7897,9 +7910,11 @@
       static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
   Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
   ASSERT(isolate->heap()->IsAllocationAllowed());
-  JavaScriptFrameIterator it(isolate);
+
+  ASSERT(deoptimizer->compiled_code_kind() != Code::COMPILED_STUB);
 
   // Make sure to materialize objects before causing any allocation.
+  JavaScriptFrameIterator it(isolate);
   deoptimizer->MaterializeHeapObjects(&it);
   delete deoptimizer;
 
@@ -10798,6 +10813,95 @@
 }
 
 
+// Set the context local variable value.
+static bool SetContextLocalValue(Isolate* isolate,
+                                 Handle<ScopeInfo> scope_info,
+                                 Handle<Context> context,
+                                 Handle<String> variable_name,
+                                 Handle<Object> new_value) {
+  for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
+    Handle<String> next_name(scope_info->ContextLocalName(i));
+    if (variable_name->Equals(*next_name)) {
+      VariableMode mode;
+      InitializationFlag init_flag;
+      int context_index =
+          scope_info->ContextSlotIndex(*next_name, &mode, &init_flag);
+      context->set(context_index, *new_value);
+      return true;
+    }
+  }
+
+  return false;
+}
+
+
+static bool SetLocalVariableValue(Isolate* isolate,
+                                  JavaScriptFrame* frame,
+                                  int inlined_jsframe_index,
+                                  Handle<String> variable_name,
+                                  Handle<Object> new_value) {
+  if (inlined_jsframe_index != 0 || frame->is_optimized()) {
+    // Optimized frames are not supported.
+    return false;
+  }
+
+  Handle<JSFunction> function(JSFunction::cast(frame->function()));
+  Handle<SharedFunctionInfo> shared(function->shared());
+  Handle<ScopeInfo> scope_info(shared->scope_info());
+
+  bool default_result = false;
+
+  // Parameters.
+  for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+    if (scope_info->ParameterName(i)->Equals(*variable_name)) {
+      frame->SetParameterValue(i, *new_value);
+      // Argument might be shadowed in heap context, don't stop here.
+      default_result = true;
+    }
+  }
+
+  // Stack locals.
+  for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+    if (scope_info->StackLocalName(i)->Equals(*variable_name)) {
+      frame->SetExpression(i, *new_value);
+      return true;
+    }
+  }
+
+  if (scope_info->HasContext()) {
+    // Context locals.
+    Handle<Context> frame_context(Context::cast(frame->context()));
+    Handle<Context> function_context(frame_context->declaration_context());
+    if (SetContextLocalValue(
+        isolate, scope_info, function_context, variable_name, new_value)) {
+      return true;
+    }
+
+    // Function context extension. These are variables introduced by eval.
+    if (function_context->closure() == *function) {
+      if (function_context->has_extension() &&
+          !function_context->IsNativeContext()) {
+        Handle<JSObject> ext(JSObject::cast(function_context->extension()));
+
+        if (ext->HasProperty(*variable_name)) {
+          // We don't expect this to do anything except replacing
+          // property value.
+          SetProperty(isolate,
+                      ext,
+                      variable_name,
+                      new_value,
+                      NONE,
+                      kNonStrictMode);
+          return true;
+        }
+      }
+    }
+  }
+
+  return default_result;
+}
+
+
 // Create a plain JSObject which materializes the closure content for the
 // context.
 static Handle<JSObject> MaterializeClosure(Isolate* isolate,
@@ -10858,19 +10962,9 @@
   Handle<ScopeInfo> scope_info(shared->scope_info());
 
   // Context locals to the context extension.
-  for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
-    Handle<String> next_name(scope_info->ContextLocalName(i));
-    if (variable_name->Equals(*next_name)) {
-      VariableMode mode;
-      InitializationFlag init_flag;
-      int context_index =
-          scope_info->ContextSlotIndex(*next_name, &mode, &init_flag);
-      if (context_index < 0) {
-        return false;
-      }
-      context->set(context_index, *new_value);
-      return true;
-    }
+  if (SetContextLocalValue(
+          isolate, scope_info, context, variable_name, new_value)) {
+    return true;
   }
 
   // Properties from the function context extension. This will
@@ -10915,6 +11009,20 @@
 }
 
 
+static bool SetCatchVariableValue(Isolate* isolate,
+                                  Handle<Context> context,
+                                  Handle<String> variable_name,
+                                  Handle<Object> new_value) {
+  ASSERT(context->IsCatchContext());
+  Handle<String> name(String::cast(context->extension()));
+  if (!name->Equals(*variable_name)) {
+    return false;
+  }
+  context->set(Context::THROWN_OBJECT_INDEX, *new_value);
+  return true;
+}
+
+
 // Create a plain JSObject which materializes the block scope for the specified
 // block context.
 static Handle<JSObject> MaterializeBlockScope(
@@ -11180,13 +11288,13 @@
       case ScopeIterator::ScopeTypeGlobal:
         break;
       case ScopeIterator::ScopeTypeLocal:
-        // TODO(2399): implement.
-        break;
+        return SetLocalVariableValue(isolate_, frame_, inlined_jsframe_index_,
+            variable_name, new_value);
       case ScopeIterator::ScopeTypeWith:
         break;
       case ScopeIterator::ScopeTypeCatch:
-        // TODO(2399): implement.
-        break;
+        return SetCatchVariableValue(isolate_, CurrentContext(),
+            variable_name, new_value);
       case ScopeIterator::ScopeTypeClosure:
         return SetClosureVariableValue(isolate_, CurrentContext(),
             variable_name, new_value);
@@ -12714,7 +12822,7 @@
   const char* error_message =
       LiveEdit::RestartFrame(it.frame(), isolate->runtime_zone());
   if (error_message) {
-    return *(isolate->factory()->LookupAsciiSymbol(error_message));
+    return *(isolate->factory()->LookupUtf8Symbol(error_message));
   }
   return heap->true_value();
 }
@@ -13098,19 +13206,49 @@
 }
 
 
-// Retrieve the raw stack trace collected on stack overflow and delete
-// it since it is used only once to avoid keeping it alive.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOverflowedRawStackTrace) {
+// Mark a function to recognize when called after GC to format the stack trace.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MarkOneShotGetter) {
+  ASSERT_EQ(args.length(), 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+  HandleScope scope(isolate);
+  Handle<String> key = isolate->factory()->hidden_stack_trace_symbol();
+  JSObject::SetHiddenProperty(fun, key, key);
+  return *fun;
+}
+
+
+// Retrieve the stack trace.  This could be the raw stack trace collected
+// on stack overflow or the already formatted stack trace string.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOverflowedStackTrace) {
+  HandleScope scope(isolate);
   ASSERT_EQ(args.length(), 1);
   CONVERT_ARG_CHECKED(JSObject, error_object, 0);
   String* key = isolate->heap()->hidden_stack_trace_symbol();
   Object* result = error_object->GetHiddenProperty(key);
-  RUNTIME_ASSERT(result->IsJSArray() || result->IsUndefined());
-  error_object->DeleteHiddenProperty(key);
+  RUNTIME_ASSERT(result->IsJSArray() ||
+                 result->IsString() ||
+                 result->IsUndefined());
   return result;
 }
 
 
+// Set or clear the stack trace attached to an stack overflow error object.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetOverflowedStackTrace) {
+  HandleScope scope(isolate);
+  ASSERT_EQ(args.length(), 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(HeapObject, value, 1);
+  Handle<String> key = isolate->factory()->hidden_stack_trace_symbol();
+  if (value->IsUndefined()) {
+    error_object->DeleteHiddenProperty(*key);
+  } else {
+    RUNTIME_ASSERT(value->IsString());
+    JSObject::SetHiddenProperty(error_object, key, value);
+  }
+  return *error_object;
+}
+
+
 // Returns V8 version as a string.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
   ASSERT_EQ(args.length(), 0);
@@ -13367,7 +13505,16 @@
     ASSERT(proto->IsJSGlobalObject());
     obj = JSReceiver::cast(proto);
   }
+  ASSERT(!(obj->map()->is_observed() && obj->IsJSObject() &&
+           JSObject::cast(obj)->HasFastElements()));
   if (obj->map()->is_observed() != is_observed) {
+    if (is_observed && obj->IsJSObject() &&
+        !JSObject::cast(obj)->HasExternalArrayElements()) {
+      // Go to dictionary mode, so that we don't skip map checks.
+      MaybeObject* maybe = JSObject::cast(obj)->NormalizeElements();
+      if (maybe->IsFailure()) return maybe;
+      ASSERT(!JSObject::cast(obj)->HasFastElements());
+    }
     MaybeObject* maybe = obj->map()->Copy();
     Map* map;
     if (!maybe->To(&map)) return maybe;
@@ -13452,7 +13599,7 @@
   for (int i = 0; i < kNumFunctions; ++i) {
     Object* name_symbol;
     { MaybeObject* maybe_name_symbol =
-          heap->LookupAsciiSymbol(kIntrinsicFunctions[i].name);
+          heap->LookupUtf8Symbol(kIntrinsicFunctions[i].name);
       if (!maybe_name_symbol->ToObject(&name_symbol)) return maybe_name_symbol;
     }
     StringDictionary* string_dictionary = StringDictionary::cast(dictionary);
diff --git a/src/runtime.h b/src/runtime.h
index c77f988..89c040e 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -89,6 +89,7 @@
   F(ForceParallelRecompile, 1, 1) \
   F(InstallRecompiledCode, 1, 1) \
   F(NotifyDeoptimized, 1, 1) \
+  F(NotifyICMiss, 0, 1) \
   F(NotifyOSR, 0, 1) \
   F(DeoptimizeFunction, 1, 1) \
   F(ClearFunctionTypeFeedback, 1, 1) \
@@ -236,7 +237,9 @@
   F(FunctionIsBuiltin, 1, 1) \
   F(GetScript, 1, 1) \
   F(CollectStackTrace, 3, 1) \
-  F(GetOverflowedRawStackTrace, 1, 1) \
+  F(MarkOneShotGetter, 1, 1) \
+  F(GetOverflowedStackTrace, 1, 1) \
+  F(SetOverflowedStackTrace, 2, 1) \
   F(GetV8Version, 0, 1) \
   \
   F(ClassOf, 1, 1) \
diff --git a/src/safepoint-table.cc b/src/safepoint-table.cc
index 714e5c3..9e42304 100644
--- a/src/safepoint-table.cc
+++ b/src/safepoint-table.cc
@@ -59,7 +59,8 @@
 
 
 SafepointTable::SafepointTable(Code* code) {
-  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION ||
+         code->kind() == Code::COMPILED_STUB);
   code_ = code;
   Address header = code->instruction_start() + code->safepoint_table_offset();
   length_ = Memory::uint32_at(header + kLengthOffset);
@@ -158,14 +159,6 @@
 
 
 void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
-  // For lazy deoptimization we need space to patch a call after every call.
-  // Ensure there is always space for such patching, even if the code ends
-  // in a call.
-  int target_offset = assembler->pc_offset() + Deoptimizer::patch_size();
-  while (assembler->pc_offset() < target_offset) {
-    assembler->nop();
-  }
-
   // Make sure the safepoint table is properly aligned. Pad with nops.
   assembler->Align(kIntSize);
   assembler->RecordComment(";;; Safepoint table.");
diff --git a/src/scanner.h b/src/scanner.h
index 4de413b..6036833 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -145,7 +145,7 @@
 // Caching predicates used by scanners.
  public:
   UnicodeCache() {}
-  typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
+  typedef unibrow::Utf8Decoder<512> Utf8Decoder;
 
   StaticResource<Utf8Decoder>* utf8_decoder() {
     return &utf8_decoder_;
@@ -315,8 +315,6 @@
   // -1 is outside of the range of any real source code.
   static const int kNoOctalLocation = -1;
 
-  typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
-
   explicit Scanner(UnicodeCache* scanner_contants);
 
   void Initialize(Utf16CharacterStream* source);
diff --git a/src/scopes.cc b/src/scopes.cc
index 56a922d..e132672 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -308,23 +308,6 @@
   }
 #endif
 
-  if (FLAG_harmony_scoping) {
-    VariableProxy* proxy = scope->CheckAssignmentToConst();
-    if (proxy != NULL) {
-      // Found an assignment to const. Throw a syntax error.
-      MessageLocation location(info->script(),
-                               proxy->position(),
-                               proxy->position());
-      Isolate* isolate = info->isolate();
-      Factory* factory = isolate->factory();
-      Handle<JSArray> array = factory->NewJSArray(0);
-      Handle<Object> result =
-          factory->NewSyntaxError("harmony_const_assign", array);
-      isolate->Throw(*result, &location);
-      return false;
-    }
-  }
-
   info->SetScope(scope);
   return true;
 }
@@ -591,29 +574,6 @@
 }
 
 
-VariableProxy* Scope::CheckAssignmentToConst() {
-  // Check this scope.
-  if (is_extended_mode()) {
-    for (int i = 0; i < unresolved_.length(); i++) {
-      ASSERT(unresolved_[i]->var() != NULL);
-      if (unresolved_[i]->var()->is_const_mode() &&
-          unresolved_[i]->IsLValue()) {
-        return unresolved_[i];
-      }
-    }
-  }
-
-  // Check inner scopes.
-  for (int i = 0; i < inner_scopes_.length(); i++) {
-    VariableProxy* proxy = inner_scopes_[i]->CheckAssignmentToConst();
-    if (proxy != NULL) return proxy;
-  }
-
-  // No assignments to const found.
-  return NULL;
-}
-
-
 class VarAndOrder {
  public:
   VarAndOrder(Variable* var, int order) : var_(var), order_(order) { }
@@ -1102,6 +1062,20 @@
 
   ASSERT(var != NULL);
 
+  if (FLAG_harmony_scoping && is_extended_mode() &&
+      var->is_const_mode() && proxy->IsLValue()) {
+    // Assignment to const. Throw a syntax error.
+    MessageLocation location(
+        info->script(), proxy->position(), proxy->position());
+    Isolate* isolate = Isolate::Current();
+    Factory* factory = isolate->factory();
+    Handle<JSArray> array = factory->NewJSArray(0);
+    Handle<Object> result =
+        factory->NewSyntaxError("harmony_const_assign", array);
+    isolate->Throw(*result, &location);
+    return false;
+  }
+
   if (FLAG_harmony_modules) {
     bool ok;
 #ifdef DEBUG
@@ -1122,9 +1096,8 @@
 
       // Inconsistent use of module. Throw a syntax error.
       // TODO(rossberg): generate more helpful error message.
-      MessageLocation location(info->script(),
-                               proxy->position(),
-                               proxy->position());
+      MessageLocation location(
+          info->script(), proxy->position(), proxy->position());
       Isolate* isolate = Isolate::Current();
       Factory* factory = isolate->factory();
       Handle<JSArray> array = factory->NewJSArray(1);
@@ -1386,9 +1359,8 @@
   if (already_resolved()) return;
   if (is_module_scope()) {
     ASSERT(interface_->IsFrozen());
-    const char raw_name[] = ".module";
-    Handle<String> name = isolate_->factory()->LookupSymbol(
-        Vector<const char>(raw_name, StrLength(raw_name)));
+    Handle<String> name = isolate_->factory()->LookupOneByteSymbol(
+        STATIC_ASCII_VECTOR(".module"));
     ASSERT(module_var_ == NULL);
     module_var_ = host_scope->NewInternal(name);
     ++host_scope->num_modules_;
diff --git a/src/scopes.h b/src/scopes.h
index c60c2e7..3ca2dcf 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -224,11 +224,6 @@
   // scope over a let binding of the same name.
   Declaration* CheckConflictingVarDeclarations();
 
-  // For harmony block scoping mode: Check if the scope has variable proxies
-  // that are used as lvalues and point to const variables. Assumes that scopes
-  // have been analyzed and variables been resolved.
-  VariableProxy* CheckAssignmentToConst();
-
   // ---------------------------------------------------------------------------
   // Scope-specific info.
 
diff --git a/src/serialize.cc b/src/serialize.cc
index dfc5574..26e0f01 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -30,6 +30,7 @@
 #include "accessors.h"
 #include "api.h"
 #include "bootstrapper.h"
+#include "deoptimizer.h"
 #include "execution.h"
 #include "global-handles.h"
 #include "ic-inl.h"
@@ -527,6 +528,17 @@
       UNCLASSIFIED,
       51,
       "Code::MakeCodeYoung");
+
+  // Add a small set of deopt entry addresses to encoder without generating the
+  // deopt table code, which isn't possible at deserialization time.
+  HandleScope scope(Isolate::Current());
+  for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
+    Address address = Deoptimizer::GetDeoptimizationEntry(
+        entry,
+        Deoptimizer::LAZY,
+        Deoptimizer::CALCULATE_ENTRY_ADDRESS);
+    Add(address, LAZY_DEOPTIMIZATION, 52 + entry, "lazy_deopt");
+  }
 }
 
 
diff --git a/src/serialize.h b/src/serialize.h
index 2041792..4bbde5a 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -47,10 +47,11 @@
   EXTENSION,
   ACCESSOR,
   RUNTIME_ENTRY,
-  STUB_CACHE_TABLE
+  STUB_CACHE_TABLE,
+  LAZY_DEOPTIMIZATION
 };
 
-const int kTypeCodeCount = STUB_CACHE_TABLE + 1;
+const int kTypeCodeCount = LAZY_DEOPTIMIZATION + 1;
 const int kFirstTypeCode = UNCLASSIFIED;
 
 const int kReferenceIdBits = 16;
@@ -59,6 +60,7 @@
 const int kDebugRegisterBits = 4;
 const int kDebugIdShift = kDebugRegisterBits;
 
+const int kDeoptTableSerializeEntryCount = 8;
 
 // ExternalReferenceTable is a helper class that defines the relationship
 // between external references and their encodings. It is used to build
diff --git a/src/smart-pointers.h b/src/smart-pointers.h
index 345c4d4..02025bb 100644
--- a/src/smart-pointers.h
+++ b/src/smart-pointers.h
@@ -58,11 +58,16 @@
   // You can get the underlying pointer out with the * operator.
   inline T* operator*() { return p_; }
 
-  // You can use [n] to index as if it was a plain pointer
+  // You can use [n] to index as if it was a plain pointer.
   inline T& operator[](size_t i) {
     return p_[i];
   }
 
+  // You can use [n] to index as if it was a plain pointer.
+  const inline T& operator[](size_t i) const {
+    return p_[i];
+  }
+
   // We don't have implicit conversion to a T* since that hinders migration:
   // You would not be able to change a method from returning a T* to
   // returning an SmartArrayPointer<T> and then get errors wherever it is used.
@@ -77,6 +82,11 @@
     return temp;
   }
 
+  inline void Reset(T* new_value) {
+    if (p_) Deallocator::Delete(p_);
+    p_ = new_value;
+  }
+
   // Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
   // the copy constructor it removes the pointer in the original to avoid
   // double freeing.
diff --git a/src/spaces.cc b/src/spaces.cc
index cacd969..35619d1 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1680,6 +1680,7 @@
       CASE(FUNCTION);
       CASE(OPTIMIZED_FUNCTION);
       CASE(STUB);
+      CASE(COMPILED_STUB);
       CASE(BUILTIN);
       CASE(LOAD_IC);
       CASE(KEYED_LOAD_IC);
@@ -1929,52 +1930,49 @@
 }
 
 
-FreeList::FreeList(PagedSpace* owner)
-    : owner_(owner), heap_(owner->heap()) {
-  Reset();
-}
-
-
-void FreeList::Reset() {
+void FreeListCategory::Reset() {
+  top_ = NULL;
+  end_ = NULL;
   available_ = 0;
-  small_list_ = NULL;
-  medium_list_ = NULL;
-  large_list_ = NULL;
-  huge_list_ = NULL;
 }
 
 
-int FreeList::Free(Address start, int size_in_bytes) {
-  if (size_in_bytes == 0) return 0;
-  FreeListNode* node = FreeListNode::FromAddress(start);
-  node->set_size(heap_, size_in_bytes);
-
-  // Early return to drop too-small blocks on the floor.
-  if (size_in_bytes < kSmallListMin) return size_in_bytes;
-
-  // Insert other blocks at the head of a free list of the appropriate
-  // magnitude.
-  if (size_in_bytes <= kSmallListMax) {
-    node->set_next(small_list_);
-    small_list_ = node;
-  } else if (size_in_bytes <= kMediumListMax) {
-    node->set_next(medium_list_);
-    medium_list_ = node;
-  } else if (size_in_bytes <= kLargeListMax) {
-    node->set_next(large_list_);
-    large_list_ = node;
-  } else {
-    node->set_next(huge_list_);
-    huge_list_ = node;
+intptr_t FreeListCategory::CountFreeListItemsInList(Page* p) {
+  int sum = 0;
+  FreeListNode* n = top_;
+  while (n != NULL) {
+    if (Page::FromAddress(n->address()) == p) {
+      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
+      sum += free_space->Size();
+    }
+    n = n->next();
   }
-  available_ += size_in_bytes;
-  ASSERT(IsVeryLong() || available_ == SumFreeLists());
-  return 0;
+  return sum;
 }
 
 
-FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
-  FreeListNode* node = *list;
+intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
+  int sum = 0;
+  FreeListNode** n = &top_;
+  while (*n != NULL) {
+    if (Page::FromAddress((*n)->address()) == p) {
+      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
+      sum += free_space->Size();
+      *n = (*n)->next();
+    } else {
+      n = (*n)->next_address();
+    }
+  }
+  if (top_ == NULL) {
+    end_ = NULL;
+  }
+  available_ -= sum;
+  return sum;
+}
+
+
+FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
+  FreeListNode* node = top_;
 
   if (node == NULL) return NULL;
 
@@ -1985,46 +1983,119 @@
   }
 
   if (node != NULL) {
+    set_top(node->next());
     *node_size = node->Size();
-    *list = node->next();
+    available_ -= *node_size;
   } else {
-    *list = NULL;
+    set_top(NULL);
+  }
+
+  if (top() == NULL) {
+    set_end(NULL);
   }
 
   return node;
 }
 
 
+void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
+  node->set_next(top_);
+  top_ = node;
+  if (end_ == NULL) {
+    end_ = node;
+  }
+  available_ += size_in_bytes;
+}
+
+
+void FreeListCategory::RepairFreeList(Heap* heap) {
+  FreeListNode* n = top_;
+  while (n != NULL) {
+    Map** map_location = reinterpret_cast<Map**>(n->address());
+    if (*map_location == NULL) {
+      *map_location = heap->free_space_map();
+    } else {
+      ASSERT(*map_location == heap->free_space_map());
+    }
+    n = n->next();
+  }
+}
+
+
+FreeList::FreeList(PagedSpace* owner)
+    : owner_(owner), heap_(owner->heap()) {
+  Reset();
+}
+
+
+void FreeList::Reset() {
+  small_list_.Reset();
+  medium_list_.Reset();
+  large_list_.Reset();
+  huge_list_.Reset();
+}
+
+
+int FreeList::Free(Address start, int size_in_bytes) {
+  if (size_in_bytes == 0) return 0;
+
+  FreeListNode* node = FreeListNode::FromAddress(start);
+  node->set_size(heap_, size_in_bytes);
+
+  // Early return to drop too-small blocks on the floor.
+  if (size_in_bytes < kSmallListMin) return size_in_bytes;
+
+  // Insert other blocks at the head of a free list of the appropriate
+  // magnitude.
+  if (size_in_bytes <= kSmallListMax) {
+    small_list_.Free(node, size_in_bytes);
+  } else if (size_in_bytes <= kMediumListMax) {
+    medium_list_.Free(node, size_in_bytes);
+  } else if (size_in_bytes <= kLargeListMax) {
+    large_list_.Free(node, size_in_bytes);
+  } else {
+    huge_list_.Free(node, size_in_bytes);
+  }
+
+  ASSERT(IsVeryLong() || available() == SumFreeLists());
+  return 0;
+}
+
+
 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
   FreeListNode* node = NULL;
 
   if (size_in_bytes <= kSmallAllocationMax) {
-    node = PickNodeFromList(&small_list_, node_size);
+    node = small_list_.PickNodeFromList(node_size);
     if (node != NULL) return node;
   }
 
   if (size_in_bytes <= kMediumAllocationMax) {
-    node = PickNodeFromList(&medium_list_, node_size);
+    node = medium_list_.PickNodeFromList(node_size);
     if (node != NULL) return node;
   }
 
   if (size_in_bytes <= kLargeAllocationMax) {
-    node = PickNodeFromList(&large_list_, node_size);
+    node = large_list_.PickNodeFromList(node_size);
     if (node != NULL) return node;
   }
 
-  for (FreeListNode** cur = &huge_list_;
+  int huge_list_available = huge_list_.available();
+  for (FreeListNode** cur = huge_list_.GetTopAddress();
        *cur != NULL;
        cur = (*cur)->next_address()) {
     FreeListNode* cur_node = *cur;
     while (cur_node != NULL &&
            Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
-      available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
+      huge_list_available -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
       cur_node = cur_node->next();
     }
 
     *cur = cur_node;
-    if (cur_node == NULL) break;
+    if (cur_node == NULL) {
+      huge_list_.set_end(NULL);
+      break;
+    }
 
     ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
     FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
@@ -2032,12 +2103,20 @@
     if (size >= size_in_bytes) {
       // Large enough node found.  Unlink it from the list.
       node = *cur;
-      *node_size = size;
       *cur = node->next();
+      *node_size = size;
+      huge_list_available -= size;
       break;
     }
   }
 
+  if (huge_list_.top() == NULL) {
+    huge_list_.set_end(NULL);
+  }
+
+  huge_list_.set_available(huge_list_available);
+  ASSERT(IsVeryLong() || available() == SumFreeLists());
+
   return node;
 }
 
@@ -2057,8 +2136,6 @@
   FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
   if (new_node == NULL) return NULL;
 
-  available_ -= new_node_size;
-  ASSERT(IsVeryLong() || available_ == SumFreeLists());
 
   int bytes_left = new_node_size - size_in_bytes;
   ASSERT(bytes_left >= 0);
@@ -2116,25 +2193,12 @@
 }
 
 
-static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
-  intptr_t sum = 0;
-  while (n != NULL) {
-    if (Page::FromAddress(n->address()) == p) {
-      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
-      sum += free_space->Size();
-    }
-    n = n->next();
-  }
-  return sum;
-}
-
-
 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
-  sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
+  sizes->huge_size_ = huge_list_.CountFreeListItemsInList(p);
   if (sizes->huge_size_ < p->area_size()) {
-    sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
-    sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
-    sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
+    sizes->small_size_ = small_list_.CountFreeListItemsInList(p);
+    sizes->medium_size_ = medium_list_.CountFreeListItemsInList(p);
+    sizes->large_size_ = large_list_.CountFreeListItemsInList(p);
   } else {
     sizes->small_size_ = 0;
     sizes->medium_size_ = 0;
@@ -2143,39 +2207,31 @@
 }
 
 
-static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
-  intptr_t sum = 0;
-  while (*n != NULL) {
-    if (Page::FromAddress((*n)->address()) == p) {
-      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
-      sum += free_space->Size();
-      *n = (*n)->next();
-    } else {
-      n = (*n)->next_address();
-    }
+intptr_t FreeList::EvictFreeListItems(Page* p) {
+  intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
+
+  if (sum < p->area_size()) {
+    sum += small_list_.EvictFreeListItemsInList(p) +
+        medium_list_.EvictFreeListItemsInList(p) +
+        large_list_.EvictFreeListItemsInList(p);
   }
+
   return sum;
 }
 
 
-intptr_t FreeList::EvictFreeListItems(Page* p) {
-  intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
-
-  if (sum < p->area_size()) {
-    sum += EvictFreeListItemsInList(&small_list_, p) +
-        EvictFreeListItemsInList(&medium_list_, p) +
-        EvictFreeListItemsInList(&large_list_, p);
-  }
-
-  available_ -= static_cast<int>(sum);
-
-  return sum;
+void FreeList::RepairLists(Heap* heap) {
+  small_list_.RepairFreeList(heap);
+  medium_list_.RepairFreeList(heap);
+  large_list_.RepairFreeList(heap);
+  huge_list_.RepairFreeList(heap);
 }
 
 
 #ifdef DEBUG
-intptr_t FreeList::SumFreeList(FreeListNode* cur) {
+intptr_t FreeListCategory::SumFreeList() {
   intptr_t sum = 0;
+  FreeListNode* cur = top_;
   while (cur != NULL) {
     ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
     FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
@@ -2189,8 +2245,9 @@
 static const int kVeryLongFreeList = 500;
 
 
-int FreeList::FreeListLength(FreeListNode* cur) {
+int FreeListCategory::FreeListLength() {
   int length = 0;
+  FreeListNode* cur = top_;
   while (cur != NULL) {
     length++;
     cur = cur->next();
@@ -2201,10 +2258,10 @@
 
 
 bool FreeList::IsVeryLong() {
-  if (FreeListLength(small_list_) == kVeryLongFreeList) return  true;
-  if (FreeListLength(medium_list_) == kVeryLongFreeList) return  true;
-  if (FreeListLength(large_list_) == kVeryLongFreeList) return  true;
-  if (FreeListLength(huge_list_) == kVeryLongFreeList) return  true;
+  if (small_list_.FreeListLength() == kVeryLongFreeList) return  true;
+  if (medium_list_.FreeListLength() == kVeryLongFreeList) return  true;
+  if (large_list_.FreeListLength() == kVeryLongFreeList) return  true;
+  if (huge_list_.FreeListLength() == kVeryLongFreeList) return  true;
   return false;
 }
 
@@ -2213,10 +2270,10 @@
 // on the free list, so it should not be called if FreeListLength returns
 // kVeryLongFreeList.
 intptr_t FreeList::SumFreeLists() {
-  intptr_t sum = SumFreeList(small_list_);
-  sum += SumFreeList(medium_list_);
-  sum += SumFreeList(large_list_);
-  sum += SumFreeList(huge_list_);
+  intptr_t sum = small_list_.SumFreeList();
+  sum += medium_list_.SumFreeList();
+  sum += large_list_.SumFreeList();
+  sum += huge_list_.SumFreeList();
   return sum;
 }
 #endif
@@ -2304,27 +2361,6 @@
 }
 
 
-static void RepairFreeList(Heap* heap, FreeListNode* n) {
-  while (n != NULL) {
-    Map** map_location = reinterpret_cast<Map**>(n->address());
-    if (*map_location == NULL) {
-      *map_location = heap->free_space_map();
-    } else {
-      ASSERT(*map_location == heap->free_space_map());
-    }
-    n = n->next();
-  }
-}
-
-
-void FreeList::RepairLists(Heap* heap) {
-  RepairFreeList(heap, small_list_);
-  RepairFreeList(heap, medium_list_);
-  RepairFreeList(heap, large_list_);
-  RepairFreeList(heap, huge_list_);
-}
-
-
 // After we have booted, we have created a map which represents free space
 // on the heap.  If there was already a free list then the elements on it
 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
diff --git a/src/spaces.h b/src/spaces.h
index 56f629e..bb7adf7 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -1381,6 +1381,50 @@
 };
 
 
+// The free list category holds a pointer to the top element and a pointer to
+// the end element of the linked list of free memory blocks.
+class FreeListCategory {
+ public:
+  FreeListCategory() : top_(NULL), end_(NULL), available_(0) {}
+
+  void Reset();
+
+  void Free(FreeListNode* node, int size_in_bytes);
+
+  FreeListNode* PickNodeFromList(int *node_size);
+
+  intptr_t CountFreeListItemsInList(Page* p);
+
+  intptr_t EvictFreeListItemsInList(Page* p);
+
+  void RepairFreeList(Heap* heap);
+
+  FreeListNode** GetTopAddress() { return &top_; }
+  FreeListNode* top() const { return top_; }
+  void set_top(FreeListNode* top) { top_ = top; }
+
+  FreeListNode** GetEndAddress() { return &end_; }
+  FreeListNode* end() const { return end_; }
+  void set_end(FreeListNode* end) { end_ = end; }
+
+  int* GetAvailableAddress() { return &available_; }
+  int available() const { return available_; }
+  void set_available(int available) { available_ = available; }
+
+#ifdef DEBUG
+  intptr_t SumFreeList();
+  int FreeListLength();
+#endif
+
+ private:
+  FreeListNode* top_;
+  FreeListNode* end_;
+
+  // Total available bytes in all blocks of this free list category.
+  int available_;
+};
+
+
 // The free list for the old space.  The free list is organized in such a way
 // as to encourage objects allocated around the same time to be near each
 // other.  The normal way to allocate is intended to be by bumping a 'top'
@@ -1412,7 +1456,10 @@
   void Reset();
 
   // Return the number of bytes available on the free list.
-  intptr_t available() { return available_; }
+  intptr_t available() {
+    return small_list_.available() + medium_list_.available() +
+           large_list_.available() + huge_list_.available();
+  }
 
   // Place a node on the free list.  The block of size 'size_in_bytes'
   // starting at 'start' is placed on the free list.  The return value is the
@@ -1430,8 +1477,6 @@
 
 #ifdef DEBUG
   void Zap();
-  static intptr_t SumFreeList(FreeListNode* node);
-  static int FreeListLength(FreeListNode* cur);
   intptr_t SumFreeLists();
   bool IsVeryLong();
 #endif
@@ -1459,16 +1504,11 @@
   static const int kMinBlockSize = 3 * kPointerSize;
   static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
 
-  FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
-
   FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
 
   PagedSpace* owner_;
   Heap* heap_;
 
-  // Total available bytes in all blocks on this free list.
-  int available_;
-
   static const int kSmallListMin = 0x20 * kPointerSize;
   static const int kSmallListMax = 0xff * kPointerSize;
   static const int kMediumListMax = 0x7ff * kPointerSize;
@@ -1476,10 +1516,10 @@
   static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
   static const int kMediumAllocationMax = kSmallListMax;
   static const int kLargeAllocationMax = kMediumListMax;
-  FreeListNode* small_list_;
-  FreeListNode* medium_list_;
-  FreeListNode* large_list_;
-  FreeListNode* huge_list_;
+  FreeListCategory small_list_;
+  FreeListCategory medium_list_;
+  FreeListCategory large_list_;
+  FreeListCategory huge_list_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
 };
diff --git a/src/store-buffer.cc b/src/store-buffer.cc
index 66488ae..8a69164 100644
--- a/src/store-buffer.cc
+++ b/src/store-buffer.cc
@@ -687,10 +687,15 @@
     uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
     // Shift out the last bits including any tags.
     int_addr >>= kPointerSizeLog2;
-    int hash1 =
-        ((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
+    // The upper part of an address is basically random because of ASLR and OS
+    // non-determinism, so we use only the bits within a page for hashing to
+    // make v8's behavior (more) deterministic.
+    uintptr_t hash_addr =
+        int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
+    int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
+                 (kHashSetLength - 1));
     if (hash_set_1_[hash1] == int_addr) continue;
-    uintptr_t hash2 = (int_addr - (int_addr >> kHashSetLengthLog2));
+    uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
     hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
     hash2 &= (kHashSetLength - 1);
     if (hash_set_2_[hash2] == int_addr) continue;
diff --git a/src/string.js b/src/string.js
index badfad3..d9ca035 100644
--- a/src/string.js
+++ b/src/string.js
@@ -824,6 +824,7 @@
   for (i = 0; i < n; i++) {
     var code = %_Arguments(i);
     if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
+    if (code < 0) code = code & 0xffff;
     if (code > 0x7f) break;
     %_OneByteSeqStringSetChar(one_byte, i, code);
   }
diff --git a/src/stub-cache.h b/src/stub-cache.h
index f858e47..c562bc7 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -681,13 +681,6 @@
   Handle<Code> CompileLoadPolymorphic(MapHandleList* receiver_maps,
                                       CodeHandleList* handler_ics);
 
-  static void GenerateLoadExternalArray(MacroAssembler* masm,
-                                        ElementsKind elements_kind);
-
-  static void GenerateLoadFastElement(MacroAssembler* masm);
-
-  static void GenerateLoadFastDoubleElement(MacroAssembler* masm);
-
   static void GenerateLoadDictionaryElement(MacroAssembler* masm);
 
  private:
diff --git a/src/unicode-inl.h b/src/unicode-inl.h
index ec9c69f..8ceefe8 100644
--- a/src/unicode-inl.h
+++ b/src/unicode-inl.h
@@ -240,10 +240,51 @@
   buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
 }
 
-template <unsigned s>
-Utf8InputBuffer<s>::Utf8InputBuffer(const char* data, unsigned length)
-    : InputBuffer<Utf8, Buffer<const char*>, s>(Buffer<const char*>(data,
-                                                                    length)) {
+Utf8DecoderBase::Utf8DecoderBase()
+  : unbuffered_start_(NULL),
+    utf16_length_(0),
+    last_byte_of_buffer_unused_(false) {}
+
+Utf8DecoderBase::Utf8DecoderBase(uint16_t* buffer,
+                                 unsigned buffer_length,
+                                 const uint8_t* stream,
+                                 unsigned stream_length) {
+  Reset(buffer, buffer_length, stream, stream_length);
+}
+
+template<unsigned kBufferSize>
+Utf8Decoder<kBufferSize>::Utf8Decoder(const char* stream, unsigned length)
+  : Utf8DecoderBase(buffer_,
+                    kBufferSize,
+                    reinterpret_cast<const uint8_t*>(stream),
+                    length) {
+}
+
+template<unsigned kBufferSize>
+void Utf8Decoder<kBufferSize>::Reset(const char* stream, unsigned length) {
+  Utf8DecoderBase::Reset(buffer_,
+                         kBufferSize,
+                         reinterpret_cast<const uint8_t*>(stream),
+                         length);
+}
+
+template <unsigned kBufferSize>
+unsigned Utf8Decoder<kBufferSize>::WriteUtf16(uint16_t* data,
+                                              unsigned length) const {
+  ASSERT(length > 0);
+  if (length > utf16_length_) length = utf16_length_;
+  // memcpy everything in buffer.
+  unsigned buffer_length =
+      last_byte_of_buffer_unused_ ? kBufferSize - 1 : kBufferSize;
+  unsigned memcpy_length = length <= buffer_length  ? length : buffer_length;
+  memcpy(data, buffer_, memcpy_length*sizeof(uint16_t));
+  if (length <= buffer_length) return length;
+  ASSERT(unbuffered_start_ != NULL);
+  // Copy the rest the slow way.
+  WriteUtf16Slow(unbuffered_start_,
+                 data + buffer_length,
+                 length - buffer_length);
+  return length;
 }
 
 }  // namespace unibrow
diff --git a/src/unicode.cc b/src/unicode.cc
index 14f3806..3788dd6 100644
--- a/src/unicode.cc
+++ b/src/unicode.cc
@@ -277,58 +277,6 @@
 }
 
 
-const byte* Utf8::ReadBlock(Buffer<const char*> str, byte* buffer,
-    unsigned capacity, unsigned* chars_read_ptr, unsigned* offset_ptr) {
-  unsigned offset = *offset_ptr;
-  // Bail out early if we've reached the end of the string.
-  if (offset == str.length()) {
-    *chars_read_ptr = 0;
-    return NULL;
-  }
-  const byte* data = reinterpret_cast<const byte*>(str.data());
-  if (data[offset] <= kMaxOneByteChar) {
-    // The next character is an ASCII char so we scan forward over
-    // the following ASCII characters and return the next pure ASCII
-    // substring
-    const byte* result = data + offset;
-    offset++;
-    while ((offset < str.length()) && (data[offset] <= kMaxOneByteChar))
-      offset++;
-    *chars_read_ptr = offset - *offset_ptr;
-    *offset_ptr = offset;
-    return result;
-  } else {
-    // The next character is non-ASCII so we just fill the buffer
-    unsigned cursor = 0;
-    unsigned chars_read = 0;
-    while (offset < str.length()) {
-      uchar c = data[offset];
-      if (c <= kMaxOneByteChar) {
-        // Fast case for ASCII characters
-        if (!CharacterStream::EncodeAsciiCharacter(c,
-                                                   buffer,
-                                                   capacity,
-                                                   cursor))
-          break;
-        offset += 1;
-      } else {
-        unsigned chars = 0;
-        c = Utf8::ValueOf(data + offset, str.length() - offset, &chars);
-        if (!CharacterStream::EncodeNonAsciiCharacter(c,
-                                                      buffer,
-                                                      capacity,
-                                                      cursor))
-          break;
-        offset += chars;
-      }
-      chars_read++;
-    }
-    *offset_ptr = offset;
-    *chars_read_ptr = chars_read;
-    return buffer;
-  }
-}
-
 unsigned CharacterStream::Length() {
   unsigned result = 0;
   while (has_more()) {
@@ -356,6 +304,75 @@
   }
 }
 
+void Utf8DecoderBase::Reset(uint16_t* buffer,
+                            unsigned buffer_length,
+                            const uint8_t* stream,
+                            unsigned stream_length) {
+  // Assume everything will fit in the buffer and stream won't be needed.
+  last_byte_of_buffer_unused_ = false;
+  unbuffered_start_ = NULL;
+  bool writing_to_buffer = true;
+  // Loop until stream is read, writing to buffer as long as buffer has space.
+  unsigned utf16_length = 0;
+  while (stream_length != 0) {
+    unsigned cursor = 0;
+    uint32_t character = Utf8::ValueOf(stream, stream_length, &cursor);
+    ASSERT(cursor > 0 && cursor <= stream_length);
+    stream += cursor;
+    stream_length -= cursor;
+    bool is_two_characters = character > Utf16::kMaxNonSurrogateCharCode;
+    utf16_length += is_two_characters ? 2 : 1;
+    // Don't need to write to the buffer, but still need utf16_length.
+    if (!writing_to_buffer) continue;
+    // Write out the characters to the buffer.
+    // Must check for equality with buffer_length as we've already updated it.
+    if (utf16_length <= buffer_length) {
+      if (is_two_characters) {
+        *buffer++ = Utf16::LeadSurrogate(character);
+        *buffer++ = Utf16::TrailSurrogate(character);
+      } else {
+        *buffer++ = character;
+      }
+      if (utf16_length == buffer_length) {
+        // Just wrote last character of buffer
+        writing_to_buffer = false;
+        unbuffered_start_ = stream;
+      }
+      continue;
+    }
+    // Have gone over buffer.
+    // Last char of buffer is unused, set cursor back.
+    ASSERT(is_two_characters);
+    writing_to_buffer = false;
+    last_byte_of_buffer_unused_ = true;
+    unbuffered_start_ = stream - cursor;
+  }
+  utf16_length_ = utf16_length;
+}
+
+
+void Utf8DecoderBase::WriteUtf16Slow(const uint8_t* stream,
+                                     uint16_t* data,
+                                     unsigned data_length) {
+  while (data_length != 0) {
+    unsigned cursor = 0;
+    uint32_t character = Utf8::ValueOf(stream, Utf8::kMaxEncodedSize, &cursor);
+    // There's a total lack of bounds checking for stream
+    // as it was already done in Reset.
+    stream += cursor;
+    if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+      *data++ = Utf16::LeadSurrogate(character);
+      *data++ = Utf16::TrailSurrogate(character);
+      ASSERT(data_length > 1);
+      data_length -= 2;
+    } else {
+      *data++ = character;
+      data_length -= 1;
+    }
+  }
+}
+
+
 // Uppercase:            point.category == 'Lu'
 
 static const uint16_t kUppercaseTable0Size = 450;
diff --git a/src/unicode.h b/src/unicode.h
index 91b16c9..a9b27aa 100644
--- a/src/unicode.h
+++ b/src/unicode.h
@@ -29,7 +29,7 @@
 #define V8_UNICODE_H_
 
 #include <sys/types.h>
-
+#include <globals.h>
 /**
  * \file
  * Definitions and convenience functions for working with unicode.
@@ -140,10 +140,10 @@
   // One UTF-16 surrogate is endoded (illegally) as 3 UTF-8 bytes.
   // The illegality stems from the surrogate not being part of a pair.
   static const int kUtf8BytesToCodeASurrogate = 3;
-  static inline uchar LeadSurrogate(int char_code) {
+  static inline uint16_t LeadSurrogate(uint32_t char_code) {
     return 0xd800 + (((char_code - 0x10000) >> 10) & 0x3ff);
   }
-  static inline uchar TrailSurrogate(int char_code) {
+  static inline uint16_t TrailSurrogate(uint32_t char_code) {
     return 0xdc00 + (char_code & 0x3ff);
   }
 };
@@ -154,8 +154,6 @@
   static inline uchar Length(uchar chr, int previous);
   static inline unsigned Encode(
       char* out, uchar c, int previous);
-  static const byte* ReadBlock(Buffer<const char*> str, byte* buffer,
-      unsigned capacity, unsigned* chars_read, unsigned* offset);
   static uchar CalculateValue(const byte* str,
                               unsigned length,
                               unsigned* cursor);
@@ -170,10 +168,6 @@
   // that match are coded as a 4 byte UTF-8 sequence.
   static const unsigned kBytesSavedByCombiningSurrogates = 2;
   static const unsigned kSizeOfUnmatchedSurrogate = 3;
-
- private:
-  template <unsigned s> friend class Utf8InputBuffer;
-  friend class Test;
   static inline uchar ValueOf(const byte* str,
                               unsigned length,
                               unsigned* cursor);
@@ -245,17 +239,42 @@
   byte util_buffer_[kSize];
 };
 
-// --- U t f 8   I n p u t   B u f f e r ---
 
-template <unsigned s = 256>
-class Utf8InputBuffer : public InputBuffer<Utf8, Buffer<const char*>, s> {
+class Utf8DecoderBase {
  public:
-  inline Utf8InputBuffer() { }
-  inline Utf8InputBuffer(const char* data, unsigned length);
-  inline void Reset(const char* data, unsigned length) {
-    InputBuffer<Utf8, Buffer<const char*>, s>::Reset(
-        Buffer<const char*>(data, length));
-  }
+  // Initialization done in subclass.
+  inline Utf8DecoderBase();
+  inline Utf8DecoderBase(uint16_t* buffer,
+                         unsigned buffer_length,
+                         const uint8_t* stream,
+                         unsigned stream_length);
+  inline unsigned Utf16Length() const { return utf16_length_; }
+ protected:
+  // This reads all characters and sets the utf16_length_.
+  // The first buffer_length utf16 chars are cached in the buffer.
+  void Reset(uint16_t* buffer,
+             unsigned buffer_length,
+             const uint8_t* stream,
+             unsigned stream_length);
+  static void WriteUtf16Slow(const uint8_t* stream,
+                             uint16_t* data,
+                             unsigned length);
+  const uint8_t* unbuffered_start_;
+  unsigned utf16_length_;
+  bool last_byte_of_buffer_unused_;
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Utf8DecoderBase);
+};
+
+template <unsigned kBufferSize>
+class Utf8Decoder : public Utf8DecoderBase {
+ public:
+  inline Utf8Decoder() {}
+  inline Utf8Decoder(const char* stream, unsigned length);
+  inline void Reset(const char* stream, unsigned length);
+  inline unsigned WriteUtf16(uint16_t* data, unsigned length) const;
+ private:
+  uint16_t buffer_[kBufferSize];
 };
 
 
diff --git a/src/utils.h b/src/utils.h
index e03f96f..d964983 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -522,6 +522,8 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector);
 };
 
+#define STATIC_ASCII_VECTOR(x)                        \
+  v8::internal::Vector<const char>(x, ARRAY_SIZE(x)-1)
 
 inline Vector<const char> CStrVector(const char* data) {
   return Vector<const char>(data, StrLength(data));
@@ -1015,6 +1017,7 @@
   static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
   static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
   static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
+  static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
 
   bool IsNone() const { return id_ == kNoneId; }
   bool operator==(const BailoutId& other) const { return id_ == other.id_; }
@@ -1030,9 +1033,12 @@
   // code (function declarations).
   static const int kDeclarationsId = 3;
 
-  // Ever FunctionState starts with this id.
+  // Every FunctionState starts with this id.
   static const int kFirstUsableId = 4;
 
+  // Every compiled stub starts with this id.
+  static const int kStubEntryId = 5;
+
   int id_;
 };
 
diff --git a/src/v8natives.js b/src/v8natives.js
index e8752c8..3978e88 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -893,16 +893,35 @@
     }
     // Make sure the below call to DefineObjectProperty() doesn't overwrite
     // any magic "length" property by removing the value.
+    // TODO(mstarzinger): This hack should be removed once we have addressed the
+    // respective TODO in Runtime_DefineOrRedefineDataProperty.
+    // For the time being, we need a hack to prevent Object.observe from
+    // generating two change records.
+    var isObserved = %IsObserved(obj);
+    if (isObserved) %SetIsObserved(obj, false);
     obj.length = new_length;
     desc.value_ = void 0;
     desc.hasValue_ = false;
-    if (!DefineObjectProperty(obj, "length", desc, should_throw) || threw) {
+    threw = !DefineObjectProperty(obj, "length", desc, should_throw) || threw;
+    if (isObserved) %SetIsObserved(obj, true);
+    if (threw) {
       if (should_throw) {
         throw MakeTypeError("redefine_disallowed", [p]);
       } else {
         return false;
       }
     }
+    if (isObserved) {
+      var new_desc = GetOwnProperty(obj, "length");
+      var updated = length_desc.value_ !== new_desc.value_;
+      var reconfigured = length_desc.writable_ !== new_desc.writable_ ||
+          length_desc.configurable_ !== new_desc.configurable_ ||
+          length_desc.enumerable_ !== new_desc.configurable_;
+      if (updated || reconfigured) {
+        NotifyChange(reconfigured ? "reconfigured" : "updated",
+            obj, "length", length_desc.value_);
+      }
+    }
     return true;
   }
 
diff --git a/src/version.cc b/src/version.cc
index e4b59b3..9817c30 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     15
-#define BUILD_NUMBER      11
-#define PATCH_LEVEL       2
+#define BUILD_NUMBER      12
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 370cb02..cc07287 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -201,7 +201,8 @@
 // -----------------------------------------------------------------------------
 // Register constants.
 
-const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = {
+const int
+    Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
   // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
   0, 3, 2, 1, 7, 8, 9, 11, 14, 15
 };
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 24c8df3..9471a6d 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -95,21 +95,24 @@
   //  r10 - fixed scratch register
   //  r12 - smi constant register
   //  r13 - root register
+  static const int kMaxNumAllocatableRegisters = 10;
+  static int NumAllocatableRegisters() {
+    return kMaxNumAllocatableRegisters;
+  }
   static const int kNumRegisters = 16;
-  static const int kNumAllocatableRegisters = 10;
 
   static int ToAllocationIndex(Register reg) {
     return kAllocationIndexByRegisterCode[reg.code()];
   }
 
   static Register FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
     Register result = { kRegisterCodeByAllocationIndex[index] };
     return result;
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
     const char* const names[] = {
       "rax",
       "rbx",
@@ -157,7 +160,7 @@
   int code_;
 
  private:
-  static const int kRegisterCodeByAllocationIndex[kNumAllocatableRegisters];
+  static const int kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters];
   static const int kAllocationIndexByRegisterCode[kNumRegisters];
 };
 
@@ -200,7 +203,10 @@
 
 struct XMMRegister {
   static const int kNumRegisters = 16;
-  static const int kNumAllocatableRegisters = 15;
+  static const int kMaxNumAllocatableRegisters = 15;
+  static int NumAllocatableRegisters() {
+    return kMaxNumAllocatableRegisters;
+  }
 
   static int ToAllocationIndex(XMMRegister reg) {
     ASSERT(reg.code() != 0);
@@ -208,13 +214,13 @@
   }
 
   static XMMRegister FromAllocationIndex(int index) {
-    ASSERT(0 <= index && index < kNumAllocatableRegisters);
+    ASSERT(0 <= index && index < kMaxNumAllocatableRegisters);
     XMMRegister result = { index + 1 };
     return result;
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
     const char* const names[] = {
       "xmm1",
       "xmm2",
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index ed0ec68..e156dfd 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -646,6 +646,25 @@
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
 
 
+void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ Pushad();
+    __ CallRuntime(Runtime::kNotifyICMiss, 0);
+    __ Popad();
+    // Tear down internal frame.
+  }
+
+  __ pop(MemOperand(rsp, 0));  // Ignore state offset
+  __ ret(0);  // Return to IC Miss stub, continuation still on stack.
+}
+
+
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
   // Enter an internal frame.
@@ -660,17 +679,17 @@
   }
 
   // Get the full codegen state from the stack and untag it.
-  __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+  __ SmiToInteger32(r10, Operand(rsp, 1 * kPointerSize));
 
   // Switch on the state.
   Label not_no_registers, not_tos_rax;
-  __ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
+  __ cmpq(r10, Immediate(FullCodeGenerator::NO_REGISTERS));
   __ j(not_equal, &not_no_registers, Label::kNear);
   __ ret(1 * kPointerSize);  // Remove state.
 
   __ bind(&not_no_registers);
   __ movq(rax, Operand(rsp, 2 * kPointerSize));
-  __ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
+  __ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG));
   __ j(not_equal, &not_tos_rax, Label::kNear);
   __ ret(2 * kPointerSize);  // Remove state, rax.
 
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 9705718..f950368 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -36,6 +36,18 @@
 namespace v8 {
 namespace internal {
 
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { rdx, rax };
+  descriptor->register_param_count_ = 2;
+  descriptor->register_params_ = registers;
+  descriptor->deoptimization_handler_ =
+      isolate->builtins()->KeyedLoadIC_Miss();
+}
+
+
 #define __ ACCESS_MASM(masm)
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index ab8ea76..71cef58 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -37,7 +37,7 @@
 
 // Compute a transcendental math function natively, or call the
 // TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
  public:
   enum ArgumentType {
     TAGGED = 0,
@@ -60,7 +60,7 @@
 };
 
 
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
  public:
   explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
       : save_doubles_(save_fp) { }
@@ -79,7 +79,7 @@
 };
 
 
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
  public:
   UnaryOpStub(Token::Value op,
               UnaryOverwriteMode mode,
@@ -216,7 +216,7 @@
 };
 
 
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
  public:
   explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
 
@@ -238,7 +238,7 @@
 };
 
 
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
  public:
   SubStringStub() {}
 
@@ -250,7 +250,7 @@
 };
 
 
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
  public:
   StringCompareStub() {}
 
@@ -287,7 +287,7 @@
 };
 
 
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
  public:
   NumberToStringStub() { }
 
@@ -316,7 +316,7 @@
 };
 
 
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
  public:
   enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
 
@@ -378,7 +378,7 @@
 };
 
 
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
  public:
   RecordWriteStub(Register object,
                   Register value,
@@ -561,7 +561,7 @@
     Register GetRegThatIsNotRcxOr(Register r1,
                                   Register r2,
                                   Register r3) {
-      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+      for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
         Register candidate = Register::FromAllocationIndex(i);
         if (candidate.is(rcx)) continue;
         if (candidate.is(r1)) continue;
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index d444095..3a7646b 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -44,6 +44,10 @@
 
 class CodeGenerator: public AstVisitor {
  public:
+  CodeGenerator() {
+    InitializeAstVisitor();
+  }
+
   static bool MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
@@ -63,6 +67,8 @@
                               int pos,
                               bool right_here = false);
 
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index c8fdfce..d32310a 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -46,11 +46,14 @@
 }
 
 
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
-  HandleScope scope;
+void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
+    JSFunction* function) {
+  Isolate* isolate = function->GetIsolate();
+  HandleScope scope(isolate);
   AssertNoAllocation no_allocation;
 
-  if (!function->IsOptimized()) return;
+  ASSERT(function->IsOptimized());
+  ASSERT(function->FunctionsInFunctionListShareSameCode());
 
   // The optimized code is going to be patched, so we cannot use it
   // any more.  Play safe and reset the whole cache.
@@ -91,8 +94,6 @@
 #endif
   }
 
-  Isolate* isolate = code->GetIsolate();
-
   // Add the deoptimizing code to the list.
   DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
   DeoptimizerData* data = isolate->deoptimizer_data();
@@ -199,7 +200,7 @@
 
 void Deoptimizer::DoComputeOsrOutputFrame() {
   DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      optimized_code_->deoptimization_data());
+      compiled_code_->deoptimization_data());
   unsigned ast_id = data->OsrAstId()->value();
   // TODO(kasperl): This should not be the bailout_id_. It should be
   // the ast id. Confusing.
@@ -236,7 +237,7 @@
   unsigned input_frame_size = input_->GetFrameSize();
   ASSERT(fixed_size + height_in_bytes == input_frame_size);
 
-  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+  unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
   unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
   unsigned outgoing_size = outgoing_height * kPointerSize;
   unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -328,7 +329,7 @@
 
     unsigned pc_offset = data->OsrPcOffset()->value();
     intptr_t pc = reinterpret_cast<intptr_t>(
-        optimized_code_->entry() + pc_offset);
+        compiled_code_->entry() + pc_offset);
     output_[0]->SetPc(pc);
   }
   Code* continuation =
@@ -447,6 +448,70 @@
 }
 
 
+void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
+                                      int frame_index) {
+  //
+  //               FROM                                  TO             <-rbp
+  //    |          ....           |          |          ....           |
+  //    +-------------------------+          +-------------------------+
+  //    | JSFunction continuation |          | JSFunction continuation |
+  //    +-------------------------+          +-------------------------+<-rsp
+  // |  |   saved frame (rbp)     |
+  // |  +=========================+<-rbp
+  // |  |   JSFunction context    |
+  // v  +-------------------------+
+  //    |   COMPILED_STUB marker  |          rbp = saved frame
+  //    +-------------------------+          rsi = JSFunction context
+  //    |                         |
+  //    | ...                     |
+  //    |                         |
+  //    +-------------------------+<-rsp
+  //
+  //
+  int output_frame_size = 1 * kPointerSize;
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, 0);
+  Code* notify_miss =
+      isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
+  output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+  output_frame->SetContinuation(
+      reinterpret_cast<intptr_t>(notify_miss->entry()));
+
+  ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+  int major_key = compiled_code_->major_key();
+  CodeStubInterfaceDescriptor* descriptor =
+      isolate_->code_stub_interface_descriptor(major_key);
+  Handle<Code> miss_ic(descriptor->deoptimization_handler_);
+  output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
+  unsigned input_frame_size = input_->GetFrameSize();
+  intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
+  output_frame->SetFrameSlot(0, value);
+  value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
+  output_frame->SetRegister(rbp.code(), value);
+  output_frame->SetFp(value);
+  value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
+  output_frame->SetRegister(rsi.code(), value);
+
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  ASSERT(opcode == Translation::REGISTER);
+  USE(opcode);
+  int input_reg = iterator->Next();
+  intptr_t input_value = input_->GetRegister(input_reg);
+  output_frame->SetRegister(rdx.code(), input_value);
+
+  int32_t next = iterator->Next();
+  opcode = static_cast<Translation::Opcode>(next);
+  ASSERT(opcode == Translation::REGISTER);
+  input_reg = iterator->Next();
+  input_value = input_->GetRegister(input_reg);
+  output_frame->SetRegister(rax.code(), input_value);
+
+  ASSERT(frame_index == 0);
+  output_[frame_index] = output_frame;
+}
+
+
 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
                                               int frame_index) {
   Builtins* builtins = isolate_->builtins();
@@ -866,7 +931,7 @@
   }
   input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
   input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
     input_->SetDoubleRegister(i, 0.0);
   }
 
@@ -886,10 +951,10 @@
   const int kNumberOfRegisters = Register::kNumRegisters;
 
   const int kDoubleRegsSize = kDoubleSize *
-                              XMMRegister::kNumAllocatableRegisters;
+      XMMRegister::NumAllocatableRegisters();
   __ subq(rsp, Immediate(kDoubleRegsSize));
 
-  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
     XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
     int offset = i * kDoubleSize;
     __ movsd(Operand(rsp, offset), xmm_reg);
@@ -978,7 +1043,7 @@
 
   // Fill in the double input registers.
   int double_regs_offset = FrameDescription::double_registers_offset();
-  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+  for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
     int dst_offset = i * kDoubleSize + double_regs_offset;
     __ pop(Operand(rbx, dst_offset));
   }
@@ -999,10 +1064,13 @@
   // limit and copy the contents of the activation frame to the input
   // frame description.
   __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+  Label pop_loop_header;
+  __ jmp(&pop_loop_header);
   Label pop_loop;
   __ bind(&pop_loop);
   __ pop(Operand(rdx, 0));
   __ addq(rdx, Immediate(sizeof(intptr_t)));
+  __ bind(&pop_loop_header);
   __ cmpq(rcx, rsp);
   __ j(not_equal, &pop_loop);
 
@@ -1019,28 +1087,33 @@
   __ pop(rax);
 
   // Replace the current frame with the output frames.
-  Label outer_push_loop, inner_push_loop;
+  Label outer_push_loop, inner_push_loop,
+      outer_loop_header, inner_loop_header;
   // Outer loop state: rax = current FrameDescription**, rdx = one past the
   // last FrameDescription**.
   __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
   __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
   __ lea(rdx, Operand(rax, rdx, times_8, 0));
+  __ jmp(&outer_loop_header);
   __ bind(&outer_push_loop);
   // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
   __ movq(rbx, Operand(rax, 0));
   __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+  __ jmp(&inner_loop_header);
   __ bind(&inner_push_loop);
   __ subq(rcx, Immediate(sizeof(intptr_t)));
   __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+  __ bind(&inner_loop_header);
   __ testq(rcx, rcx);
   __ j(not_zero, &inner_push_loop);
   __ addq(rax, Immediate(kPointerSize));
+  __ bind(&outer_loop_header);
   __ cmpq(rax, rdx);
   __ j(below, &outer_push_loop);
 
   // In case of OSR, we have to restore the XMM registers.
   if (type() == OSR) {
-    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+    for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
       XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
       int src_offset = i * kDoubleSize + double_regs_offset;
       __ movsd(xmm_reg, Operand(rbx, src_offset));
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 68773e9..e0aeb8e 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1965,7 +1965,7 @@
 
 
 void FullCodeGenerator::EmitAssignment(Expression* expr) {
-  // Invalid left-hand sides are rewritten to have a 'throw
+  // Invalid left-hand sides are rewritten by the parser to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
     VisitForEffect(expr);
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index c69b27c..20a34b3 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -119,35 +119,45 @@
 bool LCodeGen::GeneratePrologue() {
   ASSERT(is_generating());
 
-  ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
 #ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-    __ int3();
-  }
+    if (strlen(FLAG_stop_at) > 0 &&
+        info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+      __ int3();
+    }
 #endif
 
-  // Strict mode functions need to replace the receiver with undefined
-  // when called as functions (without an explicit receiver
-  // object). rcx is zero for method calls and non-zero for function
-  // calls.
-  if (!info_->is_classic_mode() || info_->is_native()) {
-    Label ok;
-    __ testq(rcx, rcx);
-    __ j(zero, &ok, Label::kNear);
-    // +1 for return address.
-    int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
-    __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
-    __ movq(Operand(rsp, receiver_offset), kScratchRegister);
-    __ bind(&ok);
+    // Strict mode functions need to replace the receiver with undefined
+    // when called as functions (without an explicit receiver
+    // object). rcx is zero for method calls and non-zero for function
+    // calls.
+    if (!info_->is_classic_mode() || info_->is_native()) {
+      Label ok;
+      __ testq(rcx, rcx);
+      __ j(zero, &ok, Label::kNear);
+      // +1 for return address.
+      int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+      __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+      __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+      __ bind(&ok);
+    }
   }
 
   info()->set_prologue_offset(masm_->pc_offset());
-  __ push(rbp);  // Caller's frame pointer.
-  __ movq(rbp, rsp);
-  __ push(rsi);  // Callee's context.
-  __ push(rdi);  // Callee's JS function.
+  if (NeedsEagerFrame()) {
+    ASSERT(!frame_is_built_);
+    frame_is_built_ = true;
+    __ push(rbp);  // Caller's frame pointer.
+    __ movq(rbp, rsp);
+    __ push(rsi);  // Callee's context.
+    if (info()->IsStub()) {
+      __ Push(Smi::FromInt(StackFrame::STUB));
+    } else {
+      __ push(rdi);  // Callee's JS function.
+    }
+  }
 
   // Reserve space for the stack slots needed by the code.
   int slots = GetStackSlotCount();
@@ -177,7 +187,7 @@
   }
 
   // Possibly allocate a local context.
-  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
     Comment(";;; Allocate local context");
     // Argument to NewContext is the function, which is still in rdi.
@@ -213,7 +223,7 @@
   }
 
   // Trace the call.
-  if (FLAG_trace) {
+  if (FLAG_trace && info()->IsOptimizing()) {
     __ CallRuntime(Runtime::kTraceEnter, 0);
   }
   return !is_aborted();
@@ -266,9 +276,55 @@
 
 
 bool LCodeGen::GenerateJumpTable() {
+  Label needs_frame_not_call;
+  Label needs_frame_is_call;
   for (int i = 0; i < jump_table_.length(); i++) {
     __ bind(&jump_table_[i].label);
-    __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
+    Address entry = jump_table_[i].address;
+    if (jump_table_[i].needs_frame) {
+      __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
+      if (jump_table_[i].is_lazy_deopt) {
+        if (needs_frame_is_call.is_bound()) {
+          __ jmp(&needs_frame_is_call);
+        } else {
+          __ bind(&needs_frame_is_call);
+          __ push(rbp);
+          __ movq(rbp, rsp);
+          __ push(rsi);
+          // This variant of deopt can only be used with stubs. Since we don't
+          // have a function pointer to install in the stack frame that we're
+          // building, install a special marker there instead.
+          ASSERT(info()->IsStub());
+          __ Move(rsi, Smi::FromInt(StackFrame::STUB));
+          __ push(rsi);
+          __ movq(rsi, MemOperand(rsp, kPointerSize));
+          __ call(kScratchRegister);
+        }
+      } else {
+        if (needs_frame_not_call.is_bound()) {
+          __ jmp(&needs_frame_not_call);
+        } else {
+          __ bind(&needs_frame_not_call);
+          __ push(rbp);
+          __ movq(rbp, rsp);
+          __ push(rsi);
+          // This variant of deopt can only be used with stubs. Since we don't
+          // have a function pointer to install in the stack frame that we're
+          // building, install a special marker there instead.
+          ASSERT(info()->IsStub());
+          __ Move(rsi, Smi::FromInt(StackFrame::STUB));
+          __ push(rsi);
+          __ movq(rsi, MemOperand(rsp, kPointerSize));
+          __ jmp(kScratchRegister);
+        }
+      }
+    } else {
+      if (jump_table_[i].is_lazy_deopt) {
+        __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+      } else {
+        __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+      }
+    }
   }
   return !is_aborted();
 }
@@ -280,10 +336,32 @@
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Deferred build frame",
+                code->instruction_index(),
+                code->instr()->Mnemonic());
+        ASSERT(!frame_is_built_);
+        ASSERT(info()->IsStub());
+        frame_is_built_ = true;
+        // Build the frame in such a way that esi isn't trashed.
+        __ push(rbp);  // Caller's frame pointer.
+        __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
+        __ Push(Smi::FromInt(StackFrame::STUB));
+        __ lea(rbp, Operand(rsp, 2 * kPointerSize));
+      }
       Comment(";;; Deferred code @%d: %s.",
               code->instruction_index(),
               code->instr()->Mnemonic());
       code->Generate();
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Deferred destroy frame",
+                code->instruction_index(),
+                code->instr()->Mnemonic());
+        ASSERT(frame_is_built_);
+        frame_is_built_ = false;
+        __ movq(rsp, rbp);
+        __ pop(rbp);
+      }
       __ jmp(code->exit());
     }
   }
@@ -396,7 +474,9 @@
                    translation,
                    arguments_index,
                    arguments_count);
-  int closure_id = *info()->closure() != *environment->closure()
+  bool has_closure_id = !info()->closure().is_null() &&
+      *info()->closure() != *environment->closure();
+  int closure_id = has_closure_id
       ? DefineDeoptimizationLiteral(environment->closure())
       : Translation::kSelfLiteralId;
 
@@ -420,6 +500,9 @@
     case ARGUMENTS_ADAPTOR:
       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
       break;
+    case STUB:
+      translation->BeginCompiledStubFrame();
+      break;
   }
 
   // Inlined frames which push their arguments cause the index to be
@@ -610,20 +693,33 @@
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+  ASSERT(info()->IsOptimizing() || info()->IsStub());
+  Deoptimizer::BailoutType bailout_type = info()->IsStub()
+      ? Deoptimizer::LAZY
+      : Deoptimizer::EAGER;
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
   if (entry == NULL) {
     Abort("bailout was not prepared");
     return;
   }
 
+  ASSERT(info()->IsStub() || frame_is_built_);
+  bool lazy_deopt = info()->IsStub();
   if (cc == no_condition) {
-    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+    if (lazy_deopt) {
+      __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+    } else {
+      __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+    }
   } else {
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
     if (jump_table_.is_empty() ||
-        jump_table_.last().address != entry) {
-      jump_table_.Add(JumpTableEntry(entry), zone());
+        jump_table_.last().address != entry ||
+        jump_table_.last().needs_frame != !frame_is_built_ ||
+        jump_table_.last().is_lazy_deopt != lazy_deopt) {
+      JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt);
+      jump_table_.Add(table_entry, zone());
     }
     __ j(cc, &jump_table_.last().label);
   }
@@ -920,6 +1016,17 @@
 
     // Slow case, using idiv instruction.
     __ bind(&slow);
+
+    // Check for (kMinInt % -1).
+    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+      Label left_not_min_int;
+      __ cmpl(left_reg, Immediate(kMinInt));
+      __ j(not_zero, &left_not_min_int, Label::kNear);
+      __ cmpl(right_reg, Immediate(-1));
+      DeoptimizeIf(zero, instr->environment());
+      __ bind(&left_not_min_int);
+    }
+
     // Sign extend eax to edx.
     // (We are using only the low 32 bits of the values.)
     __ cdq();
@@ -2334,15 +2441,22 @@
 
 
 void LCodeGen::DoReturn(LReturn* instr) {
-  if (FLAG_trace) {
+  if (FLAG_trace && info()->IsOptimizing()) {
     // Preserve the return value on the stack and rely on the runtime
     // call to return the value in the same register.
     __ push(rax);
     __ CallRuntime(Runtime::kTraceExit, 1);
   }
-  __ movq(rsp, rbp);
-  __ pop(rbp);
-  __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
+  if (NeedsEagerFrame()) {
+    __ movq(rsp, rbp);
+    __ pop(rbp);
+  }
+  if (info()->IsStub()) {
+    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+    __ Ret(0, r10);
+  } else {
+    __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
+  }
 }
 
 
@@ -4573,10 +4687,10 @@
 void LCodeGen::DoCheckMapCommon(Register reg,
                                 Handle<Map> map,
                                 CompareMapMode mode,
-                                LEnvironment* env) {
+                                LInstruction* instr) {
   Label success;
   __ CompareMap(reg, map, &success, mode);
-  DeoptimizeIf(not_equal, env);
+  DeoptimizeIf(not_equal, instr->environment());
   __ bind(&success);
 }
 
@@ -4594,7 +4708,7 @@
     __ j(equal, &success);
   }
   Handle<Map> map = map_set->last();
-  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
+  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
   __ bind(&success);
 }
 
@@ -4661,7 +4775,7 @@
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
     DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+                     ALLOW_ELEMENT_TRANSITION_MAPS, instr);
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
@@ -4670,7 +4784,7 @@
 
   // Check the holder map.
     DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+                     ALLOW_ELEMENT_TRANSITION_MAPS, instr);
 }
 
 
@@ -5206,6 +5320,7 @@
 
 
 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+  if (info()->IsStub()) return;
   // Ensure that we have enough space after the previous lazy-bailout
   // instruction for patching the code here.
   int current_pc = masm()->pc_offset();
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index e068f14..2fa10e1 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -63,6 +63,7 @@
         deferred_(8, info->zone()),
         osr_pc_offset_(-1),
         last_lazy_deopt_pc_(0),
+        frame_is_built_(false),
         safepoints_(info->zone()),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
@@ -77,6 +78,15 @@
   Heap* heap() const { return isolate()->heap(); }
   Zone* zone() const { return zone_; }
 
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 ||
+        info()->is_non_deferred_calling() ||
+        !info()->IsStub();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
   // Support for converting LOperands to assembler types.
   Register ToRegister(LOperand* op) const;
   XMMRegister ToDoubleRegister(LOperand* op) const;
@@ -110,7 +120,7 @@
                                        Label* map_check);
 
   void DoCheckMapCommon(Register reg, Handle<Map> map,
-                        CompareMapMode mode, LEnvironment* env);
+                        CompareMapMode mode, LInstruction* instr);
 
 // Parallel move support.
   void DoParallelMove(LParallelMove* move);
@@ -158,7 +168,7 @@
                        Register scratch);
 
   int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
-  int GetParameterCount() const { return scope()->num_parameters(); }
+  int GetParameterCount() const { return info()->num_parameters(); }
 
   void Abort(const char* reason);
   void Comment(const char* format, ...);
@@ -327,11 +337,15 @@
                     int* offset);
 
   struct JumpTableEntry {
-    explicit inline JumpTableEntry(Address entry)
+    inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
         : label(),
-          address(entry) { }
+          address(entry),
+          needs_frame(frame),
+          is_lazy_deopt(is_lazy) { }
     Label label;
     Address address;
+    bool needs_frame;
+    bool is_lazy_deopt;
   };
 
   void EnsureSpaceForLazyDeopt(int space_needed);
@@ -360,6 +374,7 @@
   ZoneList<LDeferredCode*> deferred_;
   int osr_pc_offset_;
   int last_lazy_deopt_pc_;
+  bool frame_is_built_;
 
   // Builder that keeps track of safepoints in the code. The table
   // itself is emitted at the end of the generated code.
@@ -374,6 +389,7 @@
    public:
     explicit PushSafepointRegistersScope(LCodeGen* codegen)
         : codegen_(codegen) {
+      ASSERT(codegen_->info()->is_calling());
       ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
       codegen_->masm_->PushSafepointRegisters();
       codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 81228ce..574970c 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -44,10 +44,10 @@
 #undef DEFINE_COMPILE
 
 LOsrEntry::LOsrEntry() {
-  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
     register_spills_[i] = NULL;
   }
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
     double_register_spills_[i] = NULL;
   }
 }
@@ -619,6 +619,8 @@
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
+
 #ifdef DEBUG
   instr->VerifyCall();
 #endif
@@ -1635,8 +1637,12 @@
 LInstruction* LChunkBuilder::DoChange(HChange* instr) {
   Representation from = instr->from();
   Representation to = instr->to();
+  // Only mark conversions that might need to allocate as calling rather than
+  // all changes. This makes simple, non-allocating conversion not have to force
+  // building a stack frame.
   if (from.IsTagged()) {
     if (to.IsDouble()) {
+      info()->MarkAsDeferredCalling();
       LOperand* value = UseRegister(instr->value());
       LNumberUntagD* res = new(zone()) LNumberUntagD(value);
       return AssignEnvironment(DefineAsRegister(res));
@@ -1654,6 +1660,7 @@
     }
   } else if (from.IsDouble()) {
     if (to.IsTagged()) {
+      info()->MarkAsDeferredCalling();
       LOperand* value = UseRegister(instr->value());
       LOperand* temp = TempRegister();
 
@@ -1667,6 +1674,7 @@
       return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToI(value)));
     }
   } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
     if (to.IsTagged()) {
       HValue* val = instr->value();
       LOperand* value = UseRegister(val);
@@ -2133,8 +2141,17 @@
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
-  int spill_index = chunk()->GetParameterStackSlot(instr->index());
-  return DefineAsSpilled(new(zone()) LParameter, spill_index);
+  LParameter* result = new(zone()) LParameter;
+  if (info()->IsOptimizing()) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    ASSERT(info()->IsStub());
+    CodeStubInterfaceDescriptor* descriptor =
+        info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+    Register reg = descriptor->register_params_[instr->index()];
+    return DefineFixed(result, reg);
+  }
 }
 
 
@@ -2230,6 +2247,7 @@
 
 
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  info()->MarkAsDeferredCalling();
   if (instr->is_function_entry()) {
     return MarkAsCall(new(zone()) LStackCheck, instr);
   } else {
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 4a909a1..f5f0250 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -252,6 +252,11 @@
 
   void MarkAsCall() { is_call_ = true; }
 
+  // Interface to the register allocator and iterators.
+  bool ClobbersTemps() const { return is_call_; }
+  bool ClobbersRegisters() const { return is_call_; }
+  bool ClobbersDoubleRegisters() const { return is_call_; }
+
   virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
 
   // Interface to the register allocator and iterators.
@@ -2291,8 +2296,9 @@
   // slot, i.e., that must also be restored to the spill slot on OSR entry.
   // NULL if the register has no assigned spill slot.  Indexed by allocation
   // index.
-  LOperand* register_spills_[Register::kNumAllocatableRegisters];
-  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+  LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+  LOperand* double_register_spills_[
+      DoubleRegister::kMaxNumAllocatableRegisters];
 };
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 4e4f2c5..8513a68 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -3432,7 +3432,7 @@
         arg_stack_space * kPointerSize;
     subq(rsp, Immediate(space));
     int offset = -2 * kPointerSize;
-    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+    for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
       XMMRegister reg = XMMRegister::FromAllocationIndex(i);
       movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
     }
@@ -3476,7 +3476,7 @@
   // r15 : argv
   if (save_doubles) {
     int offset = -2 * kPointerSize;
-    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+    for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
       XMMRegister reg = XMMRegister::FromAllocationIndex(i);
       movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
     }
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 0d8d6f2..1d56435 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -1414,9 +1414,9 @@
     return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
   }
 
-  // Needs access to SafepointRegisterStackIndex for optimized frame
+  // Needs access to SafepointRegisterStackIndex for compiled frame
   // traversal.
-  friend class OptimizedFrame;
+  friend class StandardFrame;
 };
 
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 683aa9d..0329966 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -3210,12 +3210,19 @@
   //  -- rsp[0] : return address
   // -----------------------------------
   ElementsKind elements_kind = receiver_map->elements_kind();
-  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+  if (receiver_map->has_fast_elements() ||
+      receiver_map->has_external_array_elements()) {
+    Handle<Code> stub = KeyedLoadFastElementStub(
+        receiver_map->instance_type() == JS_ARRAY_TYPE,
+        elements_kind).GetCode();
+    __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+  } else {
+    Handle<Code> stub =
+        KeyedLoadDictionaryElementStub().GetCode();
+    __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+  }
 
-  __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
-
-  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
-  __ jmp(ic, RelocInfo::CODE_TARGET);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
   return GetCode(Code::NORMAL, factory()->empty_string());
@@ -3457,140 +3464,6 @@
 }
 
 
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
-    MacroAssembler* masm,
-    ElementsKind elements_kind) {
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  Label slow, miss_force_generic;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
-  // Check that the index is in range.
-  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ SmiToInteger32(rcx, rax);
-  __ cmpq(rax, FieldOperand(rbx, ExternalArray::kLengthOffset));
-  // Unsigned comparison catches both negative and too-large values.
-  __ j(above_equal, &miss_force_generic);
-
-  // rax: index (as a smi)
-  // rdx: receiver (JSObject)
-  // rcx: untagged index
-  // rbx: elements array
-  __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
-  // rbx: base pointer of external storage
-  switch (elements_kind) {
-    case EXTERNAL_BYTE_ELEMENTS:
-      __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
-      break;
-    case EXTERNAL_PIXEL_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
-      break;
-    case EXTERNAL_SHORT_ELEMENTS:
-      __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
-      break;
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
-      break;
-    case EXTERNAL_INT_ELEMENTS:
-      __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
-      break;
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-      __ movl(rcx, Operand(rbx, rcx, times_4, 0));
-      break;
-    case EXTERNAL_FLOAT_ELEMENTS:
-      __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
-      break;
-    case EXTERNAL_DOUBLE_ELEMENTS:
-      __ movsd(xmm0, Operand(rbx, rcx, times_8, 0));
-      break;
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  // rax: index
-  // rdx: receiver
-  // For integer array types:
-  // rcx: value
-  // For floating-point array type:
-  // xmm0: value as double.
-
-  ASSERT(kSmiValueSize == 32);
-  if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
-    // For the UnsignedInt array type, we need to see whether
-    // the value can be represented in a Smi. If not, we need to convert
-    // it to a HeapNumber.
-    Label box_int;
-
-    __ JumpIfUIntNotValidSmiValue(rcx, &box_int, Label::kNear);
-
-    __ Integer32ToSmi(rax, rcx);
-    __ ret(0);
-
-    __ bind(&box_int);
-
-    // Allocate a HeapNumber for the int and perform int-to-double
-    // conversion.
-    // The value is zero-extended since we loaded the value from memory
-    // with movl.
-    __ cvtqsi2sd(xmm0, rcx);
-
-    __ AllocateHeapNumber(rcx, rbx, &slow);
-    // Set the value.
-    __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
-    __ movq(rax, rcx);
-    __ ret(0);
-  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
-             elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-    // For the floating-point array type, we need to always allocate a
-    // HeapNumber.
-    __ AllocateHeapNumber(rcx, rbx, &slow);
-    // Set the value.
-    __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
-    __ movq(rax, rcx);
-    __ ret(0);
-  } else {
-    __ Integer32ToSmi(rax, rcx);
-    __ ret(0);
-  }
-
-  // Slow case: Jump to runtime.
-  __ bind(&slow);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
-
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0]  : return address
-  // -----------------------------------
-
-  Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
-  __ jmp(ic, RelocInfo::CODE_TARGET);
-
-  // Miss case: Jump to runtime.
-  __ bind(&miss_force_generic);
-
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0]  : return address
-  // -----------------------------------
-  Handle<Code> miss_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
 void KeyedStoreStubCompiler::GenerateStoreExternalArray(
     MacroAssembler* masm,
     ElementsKind elements_kind) {
@@ -3780,98 +3653,6 @@
 }
 
 
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  Label miss_force_generic;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
-  // Get the elements array.
-  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ AssertFastElements(rcx);
-
-  // Check that the key is within bounds.
-  __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
-  __ j(above_equal, &miss_force_generic);
-
-  // Load the result and make sure it's not the hole.
-  SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
-  __ movq(rbx, FieldOperand(rcx,
-                            index.reg,
-                            index.scale,
-                            FixedArray::kHeaderSize));
-  __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
-  __ j(equal, &miss_force_generic);
-  __ movq(rax, rbx);
-  __ ret(0);
-
-  __ bind(&miss_force_generic);
-  Code* code = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  Handle<Code> ic(code);
-  __ jmp(ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
-    MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  Label miss_force_generic, slow_allocate_heapnumber;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  // Check that the key is a smi or a heap number convertible to a smi.
-  GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
-  // Get the elements array.
-  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ AssertFastElements(rcx);
-
-  // Check that the key is within bounds.
-  __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
-  __ j(above_equal, &miss_force_generic);
-
-  // Check for the hole
-  __ SmiToInteger32(kScratchRegister, rax);
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmpl(FieldOperand(rcx, kScratchRegister, times_8, offset),
-          Immediate(kHoleNanUpper32));
-  __ j(equal, &miss_force_generic);
-
-  // Always allocate a heap number for the result.
-  __ movsd(xmm0, FieldOperand(rcx, kScratchRegister, times_8,
-                              FixedDoubleArray::kHeaderSize));
-  __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
-  // Set the value.
-  __ movq(rax, rcx);
-  __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
-  __ ret(0);
-
-  __ bind(&slow_allocate_heapnumber);
-  Handle<Code> slow_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_Slow();
-  __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
-  __ bind(&miss_force_generic);
-  Handle<Code> miss_ic =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
 void KeyedStoreStubCompiler::GenerateStoreFastElement(
     MacroAssembler* masm,
     bool is_js_array,
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 0a5583b..a30fbda 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -853,6 +853,38 @@
 }
 
 
+THREADED_TEST(FunctionTemplateSetLength) {
+  v8::HandleScope scope;
+  LocalContext env;
+  {
+    Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(
+        handle_call, Handle<v8::Value>(), Handle<v8::Signature>(), 23);
+    Local<Function> fun = fun_templ->GetFunction();
+    env->Global()->Set(v8_str("obj"), fun);
+    Local<Script> script = v8_compile("obj.length");
+    CHECK_EQ(23, script->Run()->Int32Value());
+  }
+  {
+    Local<v8::FunctionTemplate> fun_templ =
+        v8::FunctionTemplate::New(handle_call);
+    fun_templ->SetLength(22);
+    Local<Function> fun = fun_templ->GetFunction();
+    env->Global()->Set(v8_str("obj"), fun);
+    Local<Script> script = v8_compile("obj.length");
+    CHECK_EQ(22, script->Run()->Int32Value());
+  }
+  {
+    // Without setting length it defaults to 0.
+    Local<v8::FunctionTemplate> fun_templ =
+        v8::FunctionTemplate::New(handle_call);
+    Local<Function> fun = fun_templ->GetFunction();
+    env->Global()->Set(v8_str("obj"), fun);
+    Local<Script> script = v8_compile("obj.length");
+    CHECK_EQ(0, script->Run()->Int32Value());
+  }
+}
+
+
 static void* expected_ptr;
 static v8::Handle<v8::Value> callback(const v8::Arguments& args) {
   void* ptr = v8::External::Unwrap(args.Data());
@@ -8201,6 +8233,66 @@
 }
 
 
+THREADED_TEST(HiddenPrototypeSet) {
+  v8::HandleScope handle_scope;
+  LocalContext context;
+
+  Local<v8::FunctionTemplate> ot = v8::FunctionTemplate::New();
+  Local<v8::FunctionTemplate> ht = v8::FunctionTemplate::New();
+  ht->SetHiddenPrototype(true);
+  Local<v8::FunctionTemplate> pt = v8::FunctionTemplate::New();
+  ht->InstanceTemplate()->Set(v8_str("x"), v8_num(0));
+
+  Local<v8::Object> o = ot->GetFunction()->NewInstance();
+  Local<v8::Object> h = ht->GetFunction()->NewInstance();
+  Local<v8::Object> p = pt->GetFunction()->NewInstance();
+  o->Set(v8_str("__proto__"), h);
+  h->Set(v8_str("__proto__"), p);
+
+  // Setting a property that exists on the hidden prototype goes there.
+  o->Set(v8_str("x"), v8_num(7));
+  CHECK_EQ(7, o->Get(v8_str("x"))->Int32Value());
+  CHECK_EQ(7, h->Get(v8_str("x"))->Int32Value());
+  CHECK(p->Get(v8_str("x"))->IsUndefined());
+
+  // Setting a new property should not be forwarded to the hidden prototype.
+  o->Set(v8_str("y"), v8_num(6));
+  CHECK_EQ(6, o->Get(v8_str("y"))->Int32Value());
+  CHECK(h->Get(v8_str("y"))->IsUndefined());
+  CHECK(p->Get(v8_str("y"))->IsUndefined());
+
+  // Setting a property that only exists on a prototype of the hidden prototype
+  // is treated normally again.
+  p->Set(v8_str("z"), v8_num(8));
+  CHECK_EQ(8, o->Get(v8_str("z"))->Int32Value());
+  CHECK_EQ(8, h->Get(v8_str("z"))->Int32Value());
+  CHECK_EQ(8, p->Get(v8_str("z"))->Int32Value());
+  o->Set(v8_str("z"), v8_num(9));
+  CHECK_EQ(9, o->Get(v8_str("z"))->Int32Value());
+  CHECK_EQ(8, h->Get(v8_str("z"))->Int32Value());
+  CHECK_EQ(8, p->Get(v8_str("z"))->Int32Value());
+}
+
+
+// Regression test for issue 2457.
+THREADED_TEST(HiddenPrototypeIdentityHash) {
+  v8::HandleScope handle_scope;
+  LocalContext context;
+
+  Handle<FunctionTemplate> t = FunctionTemplate::New();
+  t->SetHiddenPrototype(true);
+  t->InstanceTemplate()->Set(v8_str("foo"), v8_num(75));
+  Handle<Object> p = t->GetFunction()->NewInstance();
+  Handle<Object> o = Object::New();
+  o->SetPrototype(p);
+
+  int hash = o->GetIdentityHash();
+  USE(hash);
+  o->Set(v8_str("foo"), v8_num(42));
+  ASSERT_EQ(hash, o->GetIdentityHash());
+}
+
+
 THREADED_TEST(SetPrototype) {
   v8::HandleScope handle_scope;
   LocalContext context;
diff --git a/test/cctest/test-compiler.cc b/test/cctest/test-compiler.cc
index 807adbf..0cca0c4 100644
--- a/test/cctest/test-compiler.cc
+++ b/test/cctest/test-compiler.cc
@@ -94,7 +94,7 @@
 
 
 static MaybeObject* GetGlobalProperty(const char* name) {
-  Handle<String> symbol = FACTORY->LookupAsciiSymbol(name);
+  Handle<String> symbol = FACTORY->LookupUtf8Symbol(name);
   return Isolate::Current()->context()->global_object()->GetProperty(*symbol);
 }
 
@@ -102,7 +102,7 @@
 static void SetGlobalProperty(const char* name, Object* value) {
   Isolate* isolate = Isolate::Current();
   Handle<Object> object(value);
-  Handle<String> symbol = FACTORY->LookupAsciiSymbol(name);
+  Handle<String> symbol = FACTORY->LookupUtf8Symbol(name);
   Handle<JSObject> global(Isolate::Current()->context()->global_object());
   SetProperty(isolate, global, symbol, object, NONE, kNonStrictMode);
 }
@@ -294,13 +294,16 @@
   Execution::Call(fun0, global, 0, NULL, &has_pending_exception);
   CHECK(!has_pending_exception);
 
-  Object* foo_symbol = FACTORY->LookupAsciiSymbol("foo")->ToObjectChecked();
+  Object* foo_symbol =
+      FACTORY->LookupOneByteSymbol(STATIC_ASCII_VECTOR("foo"))->
+        ToObjectChecked();
   MaybeObject* fun1_object = Isolate::Current()->context()->global_object()->
       GetProperty(String::cast(foo_symbol));
   Handle<Object> fun1(fun1_object->ToObjectChecked());
   CHECK(fun1->IsJSFunction());
 
-  Handle<Object> argv[] = { FACTORY->LookupAsciiSymbol("hello") };
+  Handle<Object> argv[] =
+    { FACTORY->LookupOneByteSymbol(STATIC_ASCII_VECTOR("hello")) };
   Execution::Call(Handle<JSFunction>::cast(fun1),
                   global,
                   ARRAY_SIZE(argv),
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 941fa68..21f7993 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -156,7 +156,7 @@
     Handle<JSGlobalProxy> global(Handle<JSGlobalProxy>::cast(
         v8::Utils::OpenHandle(*context_->Global())));
     Handle<v8::internal::String> debug_string =
-        FACTORY->LookupAsciiSymbol("debug");
+        FACTORY->LookupOneByteSymbol(STATIC_ASCII_VECTOR("debug"));
     SetProperty(isolate, global, debug_string,
         Handle<Object>(debug->debug_context()->global_proxy()), DONT_ENUM,
         ::v8::internal::kNonStrictMode);
diff --git a/test/cctest/test-decls.cc b/test/cctest/test-decls.cc
index 824c4e7..d379fbb 100644
--- a/test/cctest/test-decls.cc
+++ b/test/cctest/test-decls.cc
@@ -161,6 +161,7 @@
       CHECK_EQ(value, catcher.Exception());
     }
   }
+  HEAP->CollectAllAvailableGarbage();  // Clean slate for the next test.
 }
 
 
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index da09505..f773a77 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -442,7 +442,8 @@
   }
 
   {
-    if (CpuFeatures::IsSupported(SSE4_1)) {
+    if (CpuFeatures::IsSupported(SSE2) &&
+        CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope scope(SSE4_1);
       __ pextrd(eax, xmm0, 1);
       __ pinsrd(xmm1, eax, 0);
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index 4a65344..8da841e 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -1254,56 +1254,6 @@
 }
 
 
-TEST(DocumentURL) {
-  v8::HandleScope scope;
-  LocalContext env;
-
-  CompileRun("document = { URL:\"abcdefgh\" };");
-
-  const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8_str("document"));
-  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
-  CHECK_NE(NULL, global);
-  CHECK_EQ("Object / abcdefgh",
-           const_cast<i::HeapEntry*>(
-               reinterpret_cast<const i::HeapEntry*>(global))->name());
-}
-
-
-TEST(DocumentWithException) {
-  v8::HandleScope scope;
-  LocalContext env;
-
-  CompileRun(
-      "this.__defineGetter__(\"document\", function() { throw new Error(); })");
-  const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8_str("document"));
-  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
-  CHECK_NE(NULL, global);
-  CHECK_EQ("Object",
-           const_cast<i::HeapEntry*>(
-               reinterpret_cast<const i::HeapEntry*>(global))->name());
-}
-
-
-TEST(DocumentURLWithException) {
-  v8::HandleScope scope;
-  LocalContext env;
-
-  CompileRun(
-      "function URLWithException() {}\n"
-      "URLWithException.prototype = { get URL() { throw new Error(); } };\n"
-      "document = { URL: new URLWithException() };");
-  const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8_str("document"));
-  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
-  CHECK_NE(NULL, global);
-  CHECK_EQ("Object",
-           const_cast<i::HeapEntry*>(
-               reinterpret_cast<const i::HeapEntry*>(global))->name());
-}
-
-
 TEST(NoHandleLeaks) {
   v8::HandleScope scope;
   LocalContext env;
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 93ac211..1e43d8b 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -218,10 +218,10 @@
   // Check GC.
   HEAP->CollectGarbage(NEW_SPACE);
 
-  Handle<String> name = FACTORY->LookupAsciiSymbol("theFunction");
-  Handle<String> prop_name = FACTORY->LookupAsciiSymbol("theSlot");
-  Handle<String> prop_namex = FACTORY->LookupAsciiSymbol("theSlotx");
-  Handle<String> obj_name = FACTORY->LookupAsciiSymbol("theObject");
+  Handle<String> name = FACTORY->LookupUtf8Symbol("theFunction");
+  Handle<String> prop_name = FACTORY->LookupUtf8Symbol("theSlot");
+  Handle<String> prop_namex = FACTORY->LookupUtf8Symbol("theSlotx");
+  Handle<String> obj_name = FACTORY->LookupUtf8Symbol("theObject");
 
   {
     v8::HandleScope inner_scope;
@@ -536,12 +536,12 @@
 static void CheckSymbols(const char** strings) {
   for (const char* string = *strings; *strings != 0; string = *strings++) {
     Object* a;
-    MaybeObject* maybe_a = HEAP->LookupAsciiSymbol(string);
-    // LookupAsciiSymbol may return a failure if a GC is needed.
+    MaybeObject* maybe_a = HEAP->LookupUtf8Symbol(string);
+    // LookupUtf8Symbol may return a failure if a GC is needed.
     if (!maybe_a->ToObject(&a)) continue;
     CHECK(a->IsSymbol());
     Object* b;
-    MaybeObject* maybe_b = HEAP->LookupAsciiSymbol(string);
+    MaybeObject* maybe_b = HEAP->LookupUtf8Symbol(string);
     if (!maybe_b->ToObject(&b)) continue;
     CHECK_EQ(b, a);
     CHECK(String::cast(b)->IsEqualTo(CStrVector(string)));
@@ -561,14 +561,14 @@
   InitializeVM();
 
   v8::HandleScope sc;
-  Handle<String> name = FACTORY->LookupAsciiSymbol("theFunction");
+  Handle<String> name = FACTORY->LookupUtf8Symbol("theFunction");
   Handle<JSFunction> function =
       FACTORY->NewFunction(name, FACTORY->undefined_value());
   Handle<Map> initial_map =
       FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
   function->set_initial_map(*initial_map);
 
-  Handle<String> prop_name = FACTORY->LookupAsciiSymbol("theSlot");
+  Handle<String> prop_name = FACTORY->LookupUtf8Symbol("theSlot");
   Handle<JSObject> obj = FACTORY->NewJSObject(function);
   obj->SetProperty(
       *prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked();
@@ -590,8 +590,8 @@
   JSFunction* object_function = JSFunction::cast(raw_object);
   Handle<JSFunction> constructor(object_function);
   Handle<JSObject> obj = FACTORY->NewJSObject(constructor);
-  Handle<String> first = FACTORY->LookupAsciiSymbol("first");
-  Handle<String> second = FACTORY->LookupAsciiSymbol("second");
+  Handle<String> first = FACTORY->LookupUtf8Symbol("first");
+  Handle<String> second = FACTORY->LookupUtf8Symbol("second");
 
   // check for empty
   CHECK(!obj->HasLocalProperty(*first));
@@ -640,12 +640,12 @@
   Handle<String> s1 = FACTORY->NewStringFromAscii(CStrVector(string1));
   obj->SetProperty(
       *s1, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
-  Handle<String> s1_symbol = FACTORY->LookupAsciiSymbol(string1);
+  Handle<String> s1_symbol = FACTORY->LookupUtf8Symbol(string1);
   CHECK(obj->HasLocalProperty(*s1_symbol));
 
   // check symbol and string match
   const char* string2 = "fugl";
-  Handle<String> s2_symbol = FACTORY->LookupAsciiSymbol(string2);
+  Handle<String> s2_symbol = FACTORY->LookupUtf8Symbol(string2);
   obj->SetProperty(
       *s2_symbol, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
   Handle<String> s2 = FACTORY->NewStringFromAscii(CStrVector(string2));
@@ -657,14 +657,14 @@
   InitializeVM();
 
   v8::HandleScope sc;
-  Handle<String> name = FACTORY->LookupAsciiSymbol("theFunction");
+  Handle<String> name = FACTORY->LookupUtf8Symbol("theFunction");
   Handle<JSFunction> function =
       FACTORY->NewFunction(name, FACTORY->undefined_value());
   Handle<Map> initial_map =
       FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
   function->set_initial_map(*initial_map);
 
-  Handle<String> prop_name = FACTORY->LookupAsciiSymbol("theSlot");
+  Handle<String> prop_name = FACTORY->LookupUtf8Symbol("theSlot");
   Handle<JSObject> obj = FACTORY->NewJSObject(function);
 
   // Set a propery
@@ -681,7 +681,7 @@
   InitializeVM();
 
   v8::HandleScope sc;
-  Handle<String> name = FACTORY->LookupAsciiSymbol("Array");
+  Handle<String> name = FACTORY->LookupUtf8Symbol("Array");
   Object* raw_object = Isolate::Current()->context()->global_object()->
       GetProperty(*name)->ToObjectChecked();
   Handle<JSFunction> function = Handle<JSFunction>(
@@ -734,8 +734,8 @@
   JSFunction* object_function = JSFunction::cast(raw_object);
   Handle<JSFunction> constructor(object_function);
   Handle<JSObject> obj = FACTORY->NewJSObject(constructor);
-  Handle<String> first = FACTORY->LookupAsciiSymbol("first");
-  Handle<String> second = FACTORY->LookupAsciiSymbol("second");
+  Handle<String> first = FACTORY->LookupUtf8Symbol("first");
+  Handle<String> second = FACTORY->LookupUtf8Symbol("second");
 
   obj->SetProperty(
       *first, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
@@ -790,10 +790,10 @@
       non_ascii[3 * i + 2] = chars[2];
     }
     Handle<String> non_ascii_sym =
-        FACTORY->LookupSymbol(Vector<const char>(non_ascii, 3 * length));
+        FACTORY->LookupUtf8Symbol(Vector<const char>(non_ascii, 3 * length));
     CHECK_EQ(length, non_ascii_sym->length());
     Handle<String> ascii_sym =
-        FACTORY->LookupSymbol(Vector<const char>(ascii, length));
+        FACTORY->LookupOneByteSymbol(Vector<const char>(ascii, length));
     CHECK_EQ(length, ascii_sym->length());
     Handle<String> non_ascii_str =
         FACTORY->NewStringFromUtf8(Vector<const char>(non_ascii, 3 * length));
@@ -970,7 +970,7 @@
                        "  var z = x + y;"
                        "};"
                        "foo()";
-  Handle<String> foo_name = FACTORY->LookupAsciiSymbol("foo");
+  Handle<String> foo_name = FACTORY->LookupUtf8Symbol("foo");
 
   // This compile will add the code to the compilation cache.
   { v8::HandleScope scope;
@@ -1017,7 +1017,7 @@
                        "  var z = x + y;"
                        "};"
                        "foo()";
-  Handle<String> foo_name = FACTORY->LookupAsciiSymbol("foo");
+  Handle<String> foo_name = FACTORY->LookupUtf8Symbol("foo");
 
   // This compile will add the code to the compilation cache.
   { v8::HandleScope scope;
@@ -1087,8 +1087,8 @@
                        "  var x = 23;"
                        "};"
                        "bar();";
-  Handle<String> foo_name = FACTORY->LookupAsciiSymbol("foo");
-  Handle<String> bar_name = FACTORY->LookupAsciiSymbol("bar");
+  Handle<String> foo_name = FACTORY->LookupUtf8Symbol("foo");
+  Handle<String> bar_name = FACTORY->LookupUtf8Symbol("bar");
 
   // Perfrom one initial GC to enable code flushing.
   HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
@@ -1149,7 +1149,7 @@
                        "  var z = x + y;"
                        "};"
                        "foo()";
-  Handle<String> foo_name = FACTORY->LookupAsciiSymbol("foo");
+  Handle<String> foo_name = FACTORY->LookupUtf8Symbol("foo");
 
   // This compile will add the code to the compilation cache.
   { v8::HandleScope scope;
@@ -2429,11 +2429,7 @@
     CHECK(!resource->IsDisposed());
   }
   HEAP->CollectAllAvailableGarbage();
-  // External source is being retained by the stack trace.
-  CHECK(!resource->IsDisposed());
 
-  CompileRun("error.stack;");
-  HEAP->CollectAllAvailableGarbage();
   // External source has been released.
   CHECK(resource->IsDisposed());
   delete resource;
@@ -2497,7 +2493,7 @@
   // Fourth is the tricky part. Make sure the code containing the CallIC is
   // visited first without clearing the IC. The shared function info is then
   // visited later, causing the CallIC to be cleared.
-  Handle<String> name = FACTORY->LookupAsciiSymbol("call");
+  Handle<String> name = FACTORY->LookupUtf8Symbol("call");
   Handle<GlobalObject> global(ISOLATE->context()->global_object());
   MaybeObject* maybe_call = global->GetProperty(*name);
   JSFunction* call = JSFunction::cast(maybe_call->ToObjectChecked());
@@ -2575,3 +2571,48 @@
   // Unoptimized code is missing and the deoptimizer will go ballistic.
   CompileRun("g('bozo');");
 }
+
+
+TEST(Regress165495) {
+  i::FLAG_allow_natives_syntax = true;
+  i::FLAG_flush_code_incrementally = true;
+  InitializeVM();
+  v8::HandleScope scope;
+
+  // Perform one initial GC to enable code flushing.
+  HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+
+  // Prepare an optimized closure that the optimized code map will get
+  // populated. Then age the unoptimized code to trigger code flushing
+  // but make sure the optimized code is unreachable.
+  {
+    HandleScope inner_scope;
+    CompileRun("function mkClosure() {"
+               "  return function(x) { return x + 1; };"
+               "}"
+               "var f = mkClosure();"
+               "f(1); f(2);"
+               "%OptimizeFunctionOnNextCall(f); f(3);");
+
+    Handle<JSFunction> f =
+        v8::Utils::OpenHandle(
+            *v8::Handle<v8::Function>::Cast(
+                v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+    CHECK(f->is_compiled());
+    const int kAgingThreshold = 6;
+    for (int i = 0; i < kAgingThreshold; i++) {
+      f->shared()->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
+    }
+
+    CompileRun("f = null;");
+  }
+
+  // Simulate incremental marking so that unoptimized code is flushed
+  // even though it still is cached in the optimized code map.
+  SimulateIncrementalMarking();
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+
+  // Make a new closure that will get code installed from the code map.
+  // Unoptimized code is missing and the deoptimizer will go ballistic.
+  CompileRun("var g = mkClosure(); g('bozo');");
+}
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index 69abd8d..087db2f 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -183,7 +183,7 @@
 
   // allocate a garbage
   String* func_name =
-      String::cast(HEAP->LookupAsciiSymbol("theFunction")->ToObjectChecked());
+      String::cast(HEAP->LookupUtf8Symbol("theFunction")->ToObjectChecked());
   SharedFunctionInfo* function_share = SharedFunctionInfo::cast(
       HEAP->AllocateSharedFunctionInfo(func_name)->ToObjectChecked());
   JSFunction* function = JSFunction::cast(
@@ -202,7 +202,7 @@
   HEAP->CollectGarbage(OLD_POINTER_SPACE);
 
   func_name =
-      String::cast(HEAP->LookupAsciiSymbol("theFunction")->ToObjectChecked());
+      String::cast(HEAP->LookupUtf8Symbol("theFunction")->ToObjectChecked());
   CHECK(Isolate::Current()->context()->global_object()->
         HasLocalProperty(func_name));
   Object* func_value = Isolate::Current()->context()->global_object()->
@@ -212,11 +212,11 @@
 
   obj = JSObject::cast(HEAP->AllocateJSObject(function)->ToObjectChecked());
   String* obj_name =
-      String::cast(HEAP->LookupAsciiSymbol("theObject")->ToObjectChecked());
+      String::cast(HEAP->LookupUtf8Symbol("theObject")->ToObjectChecked());
   Isolate::Current()->context()->global_object()->SetProperty(
       obj_name, obj, NONE, kNonStrictMode)->ToObjectChecked();
   String* prop_name =
-      String::cast(HEAP->LookupAsciiSymbol("theSlot")->ToObjectChecked());
+      String::cast(HEAP->LookupUtf8Symbol("theSlot")->ToObjectChecked());
   obj->SetProperty(prop_name,
                    Smi::FromInt(23),
                    NONE,
@@ -225,7 +225,7 @@
   HEAP->CollectGarbage(OLD_POINTER_SPACE);
 
   obj_name =
-      String::cast(HEAP->LookupAsciiSymbol("theObject")->ToObjectChecked());
+      String::cast(HEAP->LookupUtf8Symbol("theObject")->ToObjectChecked());
   CHECK(Isolate::Current()->context()->global_object()->
         HasLocalProperty(obj_name));
   CHECK(Isolate::Current()->context()->global_object()->
@@ -233,7 +233,7 @@
   obj = JSObject::cast(Isolate::Current()->context()->global_object()->
                        GetProperty(obj_name)->ToObjectChecked());
   prop_name =
-      String::cast(HEAP->LookupAsciiSymbol("theSlot")->ToObjectChecked());
+      String::cast(HEAP->LookupUtf8Symbol("theSlot")->ToObjectChecked());
   CHECK(obj->GetProperty(prop_name) == Smi::FromInt(23));
 }
 
@@ -546,9 +546,9 @@
       }
     } else {
       if (v8::internal::Snapshot::IsEnabled()) {
-        CHECK_LE(delta, 2500 * 1024);  // 2400.
+        CHECK_LE(delta, 2600 * 1024);  // 2400.
       } else {
-        CHECK_LE(delta, 2860 * 1024);  // 2760.
+        CHECK_LE(delta, 3000 * 1024);  // 2920.
       }
     }
   }
diff --git a/test/cctest/test-object-observe.cc b/test/cctest/test-object-observe.cc
index 25e5557..abd1011 100644
--- a/test/cctest/test-object-observe.cc
+++ b/test/cctest/test-object-observe.cc
@@ -278,3 +278,77 @@
   }
   CHECK_EQ(3, CompileRun("records.length")->Int32Value());
 }
+
+struct RecordExpectation {
+  Handle<Value> object;
+  const char* type;
+  const char* name;
+  Handle<Value> old_value;
+};
+
+// TODO(adamk): Use this helper elsewhere in this file.
+static void ExpectRecords(Handle<Value> records,
+                          const RecordExpectation expectations[],
+                          int num) {
+  CHECK(records->IsArray());
+  Handle<Array> recordArray = records.As<Array>();
+  CHECK_EQ(num, static_cast<int>(recordArray->Length()));
+  for (int i = 0; i < num; ++i) {
+    Handle<Value> record = recordArray->Get(i);
+    CHECK(record->IsObject());
+    Handle<Object> recordObj = record.As<Object>();
+    CHECK(expectations[i].object->StrictEquals(
+        recordObj->Get(String::New("object"))));
+    CHECK(String::New(expectations[i].type)->Equals(
+        recordObj->Get(String::New("type"))));
+    CHECK(String::New(expectations[i].name)->Equals(
+        recordObj->Get(String::New("name"))));
+    if (!expectations[i].old_value.IsEmpty()) {
+      CHECK(expectations[i].old_value->Equals(
+          recordObj->Get(String::New("oldValue"))));
+    }
+  }
+}
+
+TEST(APITestBasicMutation) {
+  HarmonyIsolate isolate;
+  HandleScope scope;
+  LocalContext context;
+  Handle<Object> obj = Handle<Object>::Cast(CompileRun(
+      "var records = [];"
+      "var obj = {};"
+      "function observer(r) { [].push.apply(records, r); };"
+      "Object.observe(obj, observer);"
+      "obj"));
+  obj->Set(String::New("foo"), Number::New(1));
+  obj->Set(1, Number::New(2));
+  // ForceSet should work just as well as Set
+  obj->ForceSet(String::New("foo"), Number::New(3));
+  obj->ForceSet(Number::New(1), Number::New(4));
+  // Setting an indexed element via the property setting method
+  obj->Set(Number::New(1), Number::New(5));
+  // Setting with a non-String, non-uint32 key
+  obj->Set(Number::New(1.1), Number::New(6), DontDelete);
+  obj->Delete(String::New("foo"));
+  obj->Delete(1);
+  obj->ForceDelete(Number::New(1.1));
+
+  // Force delivery
+  // TODO(adamk): Should the above set methods trigger delivery themselves?
+  CompileRun("void 0");
+  CHECK_EQ(9, CompileRun("records.length")->Int32Value());
+  const RecordExpectation expected_records[] = {
+    { obj, "new", "foo", Handle<Value>() },
+    { obj, "new", "1", Handle<Value>() },
+    { obj, "updated", "foo", Number::New(1) },
+    { obj, "updated", "1", Number::New(2) },
+    { obj, "updated", "1", Number::New(4) },
+    { obj, "new", "1.1", Handle<Value>() },
+    { obj, "deleted", "foo", Number::New(3) },
+    { obj, "deleted", "1", Number::New(5) },
+    { obj, "deleted", "1.1", Number::New(6) }
+  };
+  ExpectRecords(CompileRun("records"),
+                expected_records,
+                ARRAY_SIZE(expected_records));
+}
diff --git a/test/cctest/test-random.cc b/test/cctest/test-random.cc
index 86d6d8c..55879bf 100644
--- a/test/cctest/test-random.cc
+++ b/test/cctest/test-random.cc
@@ -83,7 +83,8 @@
 
   CompileRun("function f() { return Math.random(); }");
 
-  Object* symbol = FACTORY->LookupAsciiSymbol("f")->ToObjectChecked();
+  Object* symbol = FACTORY->LookupOneByteSymbol(STATIC_ASCII_VECTOR("f"))->
+      ToObjectChecked();
   MaybeObject* fun_object =
       context->global_object()->GetProperty(String::cast(symbol));
   Handle<JSFunction> fun(JSFunction::cast(fun_object->ToObjectChecked()));
diff --git a/test/cctest/test-regexp.cc b/test/cctest/test-regexp.cc
index 726108d..ac605a8 100644
--- a/test/cctest/test-regexp.cc
+++ b/test/cctest/test-regexp.cc
@@ -98,7 +98,6 @@
 static bool CheckSimple(const char* input) {
   V8::Initialize(NULL);
   v8::HandleScope scope;
-  unibrow::Utf8InputBuffer<> buffer(input, StrLength(input));
   ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
   FlatStringReader reader(Isolate::Current(), CStrVector(input));
   RegExpCompileData result;
@@ -117,7 +116,6 @@
 static MinMaxPair CheckMinMaxMatch(const char* input) {
   V8::Initialize(NULL);
   v8::HandleScope scope;
-  unibrow::Utf8InputBuffer<> buffer(input, StrLength(input));
   ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
   FlatStringReader reader(Isolate::Current(), CStrVector(input));
   RegExpCompileData result;
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index 8279182..71c1e95 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -291,7 +291,8 @@
   CHECK(Isolate::Current()->global_object()->IsJSObject());
   CHECK(Isolate::Current()->native_context()->IsContext());
   CHECK(HEAP->symbol_table()->IsSymbolTable());
-  CHECK(!FACTORY->LookupAsciiSymbol("Empty")->IsFailure());
+  CHECK(!FACTORY->LookupOneByteSymbol(
+      STATIC_ASCII_VECTOR("Empty"))->IsFailure());
 }
 
 
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 5a9ccbb..652a60a 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -15,16 +15,57 @@
 #include "cctest.h"
 #include "zone-inl.h"
 
-unsigned int seed = 123;
+// Adapted from http://en.wikipedia.org/wiki/Multiply-with-carry
+class RandomNumberGenerator {
+ public:
+  RandomNumberGenerator() {
+    init();
+  }
 
-static uint32_t gen() {
-        uint64_t z;
-        z = seed;
-        z *= 279470273;
-        z %= 4294967291U;
-        seed = static_cast<unsigned int>(z);
-        return static_cast<uint32_t>(seed >> 16);
-}
+  void init(uint32_t seed = 0x5688c73e) {
+    static const uint32_t phi = 0x9e3779b9;
+    c = 362436;
+    i = kQSize-1;
+    Q[0] = seed;
+    Q[1] = seed + phi;
+    Q[2] = seed + phi + phi;
+    for (unsigned j = 3; j < kQSize; j++) {
+      Q[j] = Q[j - 3] ^ Q[j - 2] ^ phi ^ j;
+    }
+  }
+
+  uint32_t next() {
+    uint64_t a = 18782;
+    uint32_t r = 0xfffffffe;
+    i = (i + 1) & (kQSize-1);
+    uint64_t t = a * Q[i] + c;
+    c = (t >> 32);
+    uint32_t x = static_cast<uint32_t>(t + c);
+    if (x < c) {
+      x++;
+      c++;
+    }
+    return (Q[i] = r - x);
+  }
+
+  uint32_t next(int max) {
+    return next() % max;
+  }
+
+  bool next(double threshold) {
+    ASSERT(threshold >= 0.0 && threshold <= 1.0);
+    if (threshold == 1.0) return true;
+    if (threshold == 0.0) return false;
+    uint32_t value = next() % 100000;
+    return threshold > static_cast<double>(value)/100000.0;
+  }
+
+ private:
+  static const uint32_t kQSize = 4096;
+  uint32_t Q[kQSize];
+  uint32_t c;
+  uint32_t i;
+};
 
 
 using namespace v8::internal;
@@ -44,7 +85,6 @@
 }
 
 
-static const int NUMBER_OF_BUILDING_BLOCKS = 128;
 static const int DEEP_DEPTH = 8 * 1024;
 static const int SUPER_DEEP_DEPTH = 80 * 1024;
 
@@ -79,21 +119,42 @@
 };
 
 
-static void InitializeBuildingBlocks(
-    Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS]) {
+static void InitializeBuildingBlocks(Handle<String>* building_blocks,
+                                     int bb_length,
+                                     bool long_blocks,
+                                     RandomNumberGenerator* rng) {
   // A list of pointers that we don't have any interest in cleaning up.
   // If they are reachable from a root then leak detection won't complain.
   Zone* zone = Isolate::Current()->runtime_zone();
-  for (int i = 0; i < NUMBER_OF_BUILDING_BLOCKS; i++) {
-    int len = gen() % 16;
-    if (len > 14) {
+  for (int i = 0; i < bb_length; i++) {
+    int len = rng->next(16);
+    int slice_head_chars = 0;
+    int slice_tail_chars = 0;
+    int slice_depth = 0;
+    for (int j = 0; j < 3; j++) {
+      if (rng->next(0.35)) slice_depth++;
+    }
+    // Must truncate something for a slice string. Loop until
+    // at least one end will be sliced.
+    while (slice_head_chars == 0 && slice_tail_chars == 0) {
+      slice_head_chars = rng->next(15);
+      slice_tail_chars = rng->next(12);
+    }
+    if (long_blocks) {
+      // Generate building blocks which will never be merged
+      len += ConsString::kMinLength + 1;
+    } else if (len > 14) {
       len += 1234;
     }
-    switch (gen() % 4) {
+    // Don't slice 0 length strings.
+    if (len == 0) slice_depth = 0;
+    int slice_length = slice_depth*(slice_head_chars + slice_tail_chars);
+    len += slice_length;
+    switch (rng->next(4)) {
       case 0: {
         uc16 buf[2000];
         for (int j = 0; j < len; j++) {
-          buf[j] = gen() % 65536;
+          buf[j] = rng->next(0x10000);
         }
         building_blocks[i] =
             FACTORY->NewStringFromTwoByte(Vector<const uc16>(buf, len));
@@ -105,7 +166,7 @@
       case 1: {
         char buf[2000];
         for (int j = 0; j < len; j++) {
-          buf[j] = gen() % 128;
+          buf[j] = rng->next(0x80);
         }
         building_blocks[i] =
             FACTORY->NewStringFromAscii(Vector<const char>(buf, len));
@@ -117,7 +178,7 @@
       case 2: {
         uc16* buf = zone->NewArray<uc16>(len);
         for (int j = 0; j < len; j++) {
-          buf[j] = gen() % 65536;
+          buf[j] = rng->next(0x10000);
         }
         Resource* resource = new(zone) Resource(Vector<const uc16>(buf, len));
         building_blocks[i] = FACTORY->NewExternalStringFromTwoByte(resource);
@@ -127,89 +188,354 @@
         break;
       }
       case 3: {
-        char* buf = NewArray<char>(len);
+        char* buf = zone->NewArray<char>(len);
         for (int j = 0; j < len; j++) {
-          buf[j] = gen() % 128;
+          buf[j] = rng->next(0x80);
         }
-        building_blocks[i] =
-            FACTORY->NewStringFromAscii(Vector<const char>(buf, len));
+        AsciiResource* resource =
+            new(zone) AsciiResource(Vector<const char>(buf, len));
+        building_blocks[i] = FACTORY->NewExternalStringFromAscii(resource);
         for (int j = 0; j < len; j++) {
           CHECK_EQ(buf[j], building_blocks[i]->Get(j));
         }
-        DeleteArray<char>(buf);
         break;
       }
     }
+    for (int j = slice_depth; j > 0; j--) {
+      building_blocks[i] = FACTORY->NewSubString(
+          building_blocks[i],
+          slice_head_chars,
+          building_blocks[i]->length() - slice_tail_chars);
+    }
+    CHECK(len == building_blocks[i]->length() + slice_length);
   }
 }
 
 
+class ConsStringStats {
+ public:
+  ConsStringStats() {
+    Reset();
+  }
+  void Reset();
+  void VerifyEqual(const ConsStringStats& that) const;
+  unsigned leaves_;
+  unsigned empty_leaves_;
+  unsigned chars_;
+  unsigned left_traversals_;
+  unsigned right_traversals_;
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ConsStringStats);
+};
+
+
+void ConsStringStats::Reset() {
+  leaves_ = 0;
+  empty_leaves_ = 0;
+  chars_ = 0;
+  left_traversals_ = 0;
+  right_traversals_ = 0;
+}
+
+
+void ConsStringStats::VerifyEqual(const ConsStringStats& that) const {
+  CHECK(this->leaves_ == that.leaves_);
+  CHECK(this->empty_leaves_ == that.empty_leaves_);
+  CHECK(this->chars_ == that.chars_);
+  CHECK(this->left_traversals_ == that.left_traversals_);
+  CHECK(this->right_traversals_ == that.right_traversals_);
+}
+
+
+class ConsStringGenerationData {
+ public:
+  static const int kNumberOfBuildingBlocks = 256;
+  explicit ConsStringGenerationData(bool long_blocks);
+  void Reset();
+  inline Handle<String> block(int offset);
+  inline Handle<String> block(uint32_t offset);
+  // Input variables.
+  double early_termination_threshold_;
+  double leftness_;
+  double rightness_;
+  double empty_leaf_threshold_;
+  unsigned max_leaves_;
+  // Cached data.
+  Handle<String> building_blocks_[kNumberOfBuildingBlocks];
+  String* empty_string_;
+  RandomNumberGenerator rng_;
+  // Stats.
+  ConsStringStats stats_;
+  unsigned early_terminations_;
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ConsStringGenerationData);
+};
+
+
+ConsStringGenerationData::ConsStringGenerationData(bool long_blocks) {
+  rng_.init();
+  InitializeBuildingBlocks(
+      building_blocks_, kNumberOfBuildingBlocks, long_blocks, &rng_);
+  empty_string_ = Isolate::Current()->heap()->empty_string();
+  Reset();
+}
+
+
+Handle<String> ConsStringGenerationData::block(uint32_t offset) {
+  return building_blocks_[offset % kNumberOfBuildingBlocks ];
+}
+
+
+Handle<String> ConsStringGenerationData::block(int offset) {
+  CHECK_GE(offset, 0);
+  return building_blocks_[offset % kNumberOfBuildingBlocks];
+}
+
+
+void ConsStringGenerationData::Reset() {
+  early_termination_threshold_ = 0.01;
+  leftness_ = 0.75;
+  rightness_ = 0.75;
+  empty_leaf_threshold_ = 0.02;
+  max_leaves_ = 1000;
+  stats_.Reset();
+  early_terminations_ = 0;
+  rng_.init();
+}
+
+
+void AccumulateStats(ConsString* cons_string, ConsStringStats* stats) {
+  int left_length = cons_string->first()->length();
+  int right_length = cons_string->second()->length();
+  CHECK(cons_string->length() == left_length + right_length);
+  // Check left side.
+  bool left_is_cons = cons_string->first()->IsConsString();
+  if (left_is_cons) {
+    stats->left_traversals_++;
+    AccumulateStats(ConsString::cast(cons_string->first()), stats);
+  } else {
+    CHECK_NE(left_length, 0);
+    stats->leaves_++;
+    stats->chars_ += left_length;
+  }
+  // Check right side.
+  if (cons_string->second()->IsConsString()) {
+    stats->right_traversals_++;
+    AccumulateStats(ConsString::cast(cons_string->second()), stats);
+  } else {
+    if (right_length == 0) {
+      stats->empty_leaves_++;
+      CHECK(!left_is_cons);
+    }
+    stats->leaves_++;
+    stats->chars_ += right_length;
+  }
+}
+
+
+void AccumulateStats(Handle<String> cons_string, ConsStringStats* stats) {
+  AssertNoAllocation no_alloc;
+  if (cons_string->IsConsString()) {
+    return AccumulateStats(ConsString::cast(*cons_string), stats);
+  }
+  // This string got flattened by gc.
+  stats->chars_ += cons_string->length();
+}
+
+
+void AccumulateStatsWithOperator(
+    ConsString* cons_string, ConsStringStats* stats) {
+  unsigned offset = 0;
+  int32_t type = cons_string->map()->instance_type();
+  unsigned length = static_cast<unsigned>(cons_string->length());
+  ConsStringIteratorOp op;
+  String* string = op.Operate(cons_string, &offset, &type, &length);
+  CHECK(string != NULL);
+  while (true) {
+    ASSERT(!string->IsConsString());
+    // Accumulate stats.
+    stats->leaves_++;
+    stats->chars_ += string->length();
+    // Check for completion.
+    bool keep_going_fast_check = op.HasMore();
+    string = op.ContinueOperation(&type, &length);
+    if (string == NULL) return;
+    // Verify no false positives for fast check.
+    CHECK(keep_going_fast_check);
+  }
+}
+
+
+void VerifyConsString(Handle<String> root, ConsStringGenerationData* data) {
+  // Verify basic data.
+  CHECK(root->IsConsString());
+  CHECK((unsigned)root->length() == data->stats_.chars_);
+  // Recursive verify.
+  ConsStringStats stats;
+  AccumulateStats(ConsString::cast(*root), &stats);
+  stats.VerifyEqual(data->stats_);
+  // Iteratively verify.
+  stats.Reset();
+  AccumulateStatsWithOperator(ConsString::cast(*root), &stats);
+  // Don't see these. Must copy over.
+  stats.empty_leaves_ = data->stats_.empty_leaves_;
+  stats.left_traversals_ = data->stats_.left_traversals_;
+  stats.right_traversals_ = data->stats_.right_traversals_;
+  // Adjust total leaves to compensate.
+  stats.leaves_ += stats.empty_leaves_;
+  stats.VerifyEqual(data->stats_);
+}
+
+
+static Handle<String> ConstructRandomString(ConsStringGenerationData* data,
+                                            unsigned max_recursion) {
+  // Compute termination characteristics.
+  bool terminate = false;
+  bool flat = data->rng_.next(data->empty_leaf_threshold_);
+  bool terminate_early = data->rng_.next(data->early_termination_threshold_);
+  if (terminate_early) data->early_terminations_++;
+  // The obvious condition.
+  terminate |= max_recursion == 0;
+  // Flat cons string terminate by definition.
+  terminate |= flat;
+  // Cap for max leaves.
+  terminate |= data->stats_.leaves_ >= data->max_leaves_;
+  // Roll the dice.
+  terminate |= terminate_early;
+  // Compute termination characteristics for each side.
+  bool terminate_left = terminate || !data->rng_.next(data->leftness_);
+  bool terminate_right = terminate || !data->rng_.next(data->rightness_);
+  // Generate left string.
+  Handle<String> left;
+  if (terminate_left) {
+    left = data->block(data->rng_.next());
+    data->stats_.leaves_++;
+    data->stats_.chars_ += left->length();
+  } else {
+    data->stats_.left_traversals_++;
+  }
+  // Generate right string.
+  Handle<String> right;
+  if (terminate_right) {
+    right = data->block(data->rng_.next());
+    data->stats_.leaves_++;
+    data->stats_.chars_ += right->length();
+  } else {
+    data->stats_.right_traversals_++;
+  }
+  // Generate the necessary sub-nodes recursively.
+  if (!terminate_right) {
+    // Need to balance generation fairly.
+    if (!terminate_left && data->rng_.next(0.5)) {
+      left = ConstructRandomString(data, max_recursion - 1);
+    }
+    right = ConstructRandomString(data, max_recursion - 1);
+  }
+  if (!terminate_left && left.is_null()) {
+    left = ConstructRandomString(data, max_recursion - 1);
+  }
+  // Build the cons string.
+  Handle<String> root = FACTORY->NewConsString(left, right);
+  CHECK(root->IsConsString() && !root->IsFlat());
+  // Special work needed for flat string.
+  if (flat) {
+    data->stats_.empty_leaves_++;
+    FlattenString(root);
+    CHECK(root->IsConsString() && root->IsFlat());
+  }
+  return root;
+}
+
+
 static Handle<String> ConstructLeft(
-    Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS],
+    ConsStringGenerationData* data,
     int depth) {
   Handle<String> answer = FACTORY->NewStringFromAscii(CStrVector(""));
+  data->stats_.leaves_++;
   for (int i = 0; i < depth; i++) {
-    answer = FACTORY->NewConsString(
-        answer,
-        building_blocks[i % NUMBER_OF_BUILDING_BLOCKS]);
+    Handle<String> block = data->block(i);
+    Handle<String> next = FACTORY->NewConsString(answer, block);
+    if (next->IsConsString()) data->stats_.leaves_++;
+    data->stats_.chars_ += block->length();
+    answer = next;
   }
+  data->stats_.left_traversals_ = data->stats_.leaves_ - 2;
   return answer;
 }
 
 
 static Handle<String> ConstructRight(
-    Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS],
+    ConsStringGenerationData* data,
     int depth) {
   Handle<String> answer = FACTORY->NewStringFromAscii(CStrVector(""));
+  data->stats_.leaves_++;
   for (int i = depth - 1; i >= 0; i--) {
-    answer = FACTORY->NewConsString(
-        building_blocks[i % NUMBER_OF_BUILDING_BLOCKS],
-        answer);
+    Handle<String> block = data->block(i);
+    Handle<String> next = FACTORY->NewConsString(block, answer);
+    if (next->IsConsString()) data->stats_.leaves_++;
+    data->stats_.chars_ += block->length();
+    answer = next;
   }
+  data->stats_.right_traversals_ = data->stats_.leaves_ - 2;
   return answer;
 }
 
 
 static Handle<String> ConstructBalancedHelper(
-    Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS],
+    ConsStringGenerationData* data,
     int from,
     int to) {
   CHECK(to > from);
   if (to - from == 1) {
-    return building_blocks[from % NUMBER_OF_BUILDING_BLOCKS];
+    data->stats_.chars_ += data->block(from)->length();
+    return data->block(from);
   }
   if (to - from == 2) {
-    return FACTORY->NewConsString(
-        building_blocks[from % NUMBER_OF_BUILDING_BLOCKS],
-        building_blocks[(from+1) % NUMBER_OF_BUILDING_BLOCKS]);
+    data->stats_.chars_ += data->block(from)->length();
+    data->stats_.chars_ += data->block(from+1)->length();
+    return FACTORY->NewConsString(data->block(from), data->block(from+1));
   }
   Handle<String> part1 =
-    ConstructBalancedHelper(building_blocks, from, from + ((to - from) / 2));
+    ConstructBalancedHelper(data, from, from + ((to - from) / 2));
   Handle<String> part2 =
-    ConstructBalancedHelper(building_blocks, from + ((to - from) / 2), to);
+    ConstructBalancedHelper(data, from + ((to - from) / 2), to);
+  if (part1->IsConsString()) data->stats_.left_traversals_++;
+  if (part2->IsConsString()) data->stats_.right_traversals_++;
   return FACTORY->NewConsString(part1, part2);
 }
 
 
 static Handle<String> ConstructBalanced(
-    Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS]) {
-  return ConstructBalancedHelper(building_blocks, 0, DEEP_DEPTH);
+    ConsStringGenerationData* data, int depth = DEEP_DEPTH) {
+  Handle<String> string = ConstructBalancedHelper(data, 0, depth);
+  data->stats_.leaves_ =
+      data->stats_.left_traversals_ + data->stats_.right_traversals_ + 2;
+  return string;
 }
 
 
 static StringInputBuffer buffer;
-
+static ConsStringIteratorOp cons_string_iterator_op_1;
+static ConsStringIteratorOp cons_string_iterator_op_2;
 
 static void Traverse(Handle<String> s1, Handle<String> s2) {
   int i = 0;
   buffer.Reset(*s1);
+  StringCharacterStream character_stream_1(*s1, 0, &cons_string_iterator_op_1);
+  StringCharacterStream character_stream_2(*s2, 0, &cons_string_iterator_op_2);
   StringInputBuffer buffer2(*s2);
   while (buffer.has_more()) {
     CHECK(buffer2.has_more());
+    CHECK(character_stream_1.HasMore());
+    CHECK(character_stream_2.HasMore());
     uint16_t c = buffer.GetNext();
     CHECK_EQ(c, buffer2.GetNext());
+    CHECK_EQ(c, character_stream_1.GetNext());
+    CHECK_EQ(c, character_stream_2.GetNext());
     i++;
   }
+  CHECK(!character_stream_1.HasMore());
+  CHECK(!character_stream_2.HasMore());
   CHECK_EQ(s1->length(), i);
   CHECK_EQ(s2->length(), i);
 }
@@ -219,10 +545,16 @@
   int i = 0;
   buffer.Reset(*s1);
   StringInputBuffer buffer2(*s2);
+  StringCharacterStream character_stream_1(*s1, 0, &cons_string_iterator_op_1);
+  StringCharacterStream character_stream_2(*s2, 0, &cons_string_iterator_op_2);
   while (buffer.has_more() && i < chars) {
     CHECK(buffer2.has_more());
+    CHECK(character_stream_1.HasMore());
+    CHECK(character_stream_2.HasMore());
     uint16_t c = buffer.GetNext();
     CHECK_EQ(c, buffer2.GetNext());
+    CHECK_EQ(c, character_stream_1.GetNext());
+    CHECK_EQ(c, character_stream_2.GetNext());
     i++;
   }
   s1->Get(s1->length() - 1);
@@ -234,14 +566,13 @@
   printf("TestTraverse\n");
   InitializeVM();
   v8::HandleScope scope;
-  Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS];
   ZoneScope zone(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
-  InitializeBuildingBlocks(building_blocks);
-  Handle<String> flat = ConstructBalanced(building_blocks);
+  ConsStringGenerationData data(false);
+  Handle<String> flat = ConstructBalanced(&data);
   FlattenString(flat);
-  Handle<String> left_asymmetric = ConstructLeft(building_blocks, DEEP_DEPTH);
-  Handle<String> right_asymmetric = ConstructRight(building_blocks, DEEP_DEPTH);
-  Handle<String> symmetric = ConstructBalanced(building_blocks);
+  Handle<String> left_asymmetric = ConstructLeft(&data, DEEP_DEPTH);
+  Handle<String> right_asymmetric = ConstructRight(&data, DEEP_DEPTH);
+  Handle<String> symmetric = ConstructBalanced(&data);
   printf("1\n");
   Traverse(flat, symmetric);
   printf("2\n");
@@ -250,9 +581,9 @@
   Traverse(flat, right_asymmetric);
   printf("4\n");
   Handle<String> left_deep_asymmetric =
-      ConstructLeft(building_blocks, SUPER_DEEP_DEPTH);
+      ConstructLeft(&data, SUPER_DEEP_DEPTH);
   Handle<String> right_deep_asymmetric =
-      ConstructRight(building_blocks, SUPER_DEEP_DEPTH);
+      ConstructRight(&data, SUPER_DEEP_DEPTH);
   printf("5\n");
   TraverseFirst(left_asymmetric, left_deep_asymmetric, 1050);
   printf("6\n");
@@ -275,6 +606,248 @@
 }
 
 
+static void VerifyCharacterStream(
+    String* flat_string, String* cons_string) {
+  // Do not want to test ConString traversal on flat string.
+  CHECK(flat_string->IsFlat() && !flat_string->IsConsString());
+  CHECK(cons_string->IsConsString());
+  // TODO(dcarney) Test stream reset as well.
+  int length = flat_string->length();
+  // Iterate start search in multiple places in the string.
+  int outer_iterations = length > 20 ? 20 : length;
+  for (int j = 0; j <= outer_iterations; j++) {
+    int offset = length * j / outer_iterations;
+    if (offset < 0) offset = 0;
+    // Want to test the offset == length case.
+    if (offset > length) offset = length;
+    StringCharacterStream flat_stream(
+        flat_string, (unsigned) offset, &cons_string_iterator_op_1);
+    StringCharacterStream cons_stream(
+        cons_string, (unsigned) offset, &cons_string_iterator_op_2);
+    for (int i = offset; i < length; i++) {
+      uint16_t c = flat_string->Get(i);
+      CHECK(flat_stream.HasMore());
+      CHECK(cons_stream.HasMore());
+      CHECK_EQ(c, flat_stream.GetNext());
+      CHECK_EQ(c, cons_stream.GetNext());
+    }
+    CHECK(!flat_stream.HasMore());
+    CHECK(!cons_stream.HasMore());
+  }
+}
+
+
+static inline void PrintStats(const ConsStringGenerationData& data) {
+#ifdef DEBUG
+printf(
+    "%s: [%d], %s: [%d], %s: [%d], %s: [%d], %s: [%d], %s: [%d]\n",
+    "leaves", data.stats_.leaves_,
+    "empty", data.stats_.empty_leaves_,
+    "chars", data.stats_.chars_,
+    "lefts", data.stats_.left_traversals_,
+    "rights", data.stats_.right_traversals_,
+    "early_terminations", data.early_terminations_);
+#endif
+}
+
+
+template<typename BuildString>
+void TestStringCharacterStream(BuildString build, int test_cases) {
+  InitializeVM();
+  Isolate* isolate = Isolate::Current();
+  HandleScope outer_scope(isolate);
+  ZoneScope zone(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
+  ConsStringGenerationData data(true);
+  for (int i = 0; i < test_cases; i++) {
+    printf("%d\n", i);
+    HandleScope inner_scope(isolate);
+    AlwaysAllocateScope always_allocate;
+    // Build flat version of cons string.
+    Handle<String> flat_string = build(i, &data);
+    ConsStringStats flat_string_stats;
+    AccumulateStats(flat_string, &flat_string_stats);
+    // Flatten string.
+    FlattenString(flat_string);
+    // Build unflattened version of cons string to test.
+    Handle<String> cons_string = build(i, &data);
+    ConsStringStats cons_string_stats;
+    AccumulateStats(cons_string, &cons_string_stats);
+    AssertNoAllocation no_alloc;
+    PrintStats(data);
+    // Full verify of cons string.
+    cons_string_stats.VerifyEqual(flat_string_stats);
+    cons_string_stats.VerifyEqual(data.stats_);
+    VerifyConsString(cons_string, &data);
+    String* flat_string_ptr =
+        flat_string->IsConsString() ?
+        ConsString::cast(*flat_string)->first() :
+        *flat_string;
+    VerifyCharacterStream(flat_string_ptr, *cons_string);
+  }
+}
+
+
+static const int kCharacterStreamNonRandomCases = 8;
+
+
+static Handle<String> BuildEdgeCaseConsString(
+    int test_case, ConsStringGenerationData* data) {
+  data->Reset();
+  switch (test_case) {
+    case 0:
+      return ConstructBalanced(data, 71);
+    case 1:
+      return ConstructLeft(data, 71);
+    case 2:
+      return ConstructRight(data, 71);
+    case 3:
+      return ConstructLeft(data, 10);
+    case 4:
+      return ConstructRight(data, 10);
+    case 5:
+      // 2 element balanced tree.
+      data->stats_.chars_ += data->block(0)->length();
+      data->stats_.chars_ += data->block(1)->length();
+      data->stats_.leaves_ += 2;
+      return FACTORY->NewConsString(data->block(0), data->block(1));
+    case 6:
+      // Simple flattened tree.
+      data->stats_.chars_ += data->block(0)->length();
+      data->stats_.chars_ += data->block(1)->length();
+      data->stats_.leaves_ += 2;
+      data->stats_.empty_leaves_ += 1;
+      {
+        Handle<String> string =
+            FACTORY->NewConsString(data->block(0), data->block(1));
+        FlattenString(string);
+        return string;
+      }
+    case 7:
+      // Left node flattened.
+      data->stats_.chars_ += data->block(0)->length();
+      data->stats_.chars_ += data->block(1)->length();
+      data->stats_.chars_ += data->block(2)->length();
+      data->stats_.leaves_ += 3;
+      data->stats_.empty_leaves_ += 1;
+      data->stats_.left_traversals_ += 1;
+      {
+        Handle<String> left =
+            FACTORY->NewConsString(data->block(0), data->block(1));
+        FlattenString(left);
+        return FACTORY->NewConsString(left, data->block(2));
+      }
+    case 8:
+      // Left node and right node flattened.
+      data->stats_.chars_ += data->block(0)->length();
+      data->stats_.chars_ += data->block(1)->length();
+      data->stats_.chars_ += data->block(2)->length();
+      data->stats_.chars_ += data->block(3)->length();
+      data->stats_.leaves_ += 4;
+      data->stats_.empty_leaves_ += 2;
+      data->stats_.left_traversals_ += 1;
+      data->stats_.right_traversals_ += 1;
+      {
+        Handle<String> left =
+            FACTORY->NewConsString(data->block(0), data->block(1));
+        FlattenString(left);
+        Handle<String> right =
+            FACTORY->NewConsString(data->block(2), data->block(2));
+        FlattenString(right);
+        return FACTORY->NewConsString(left, right);
+      }
+  }
+  UNREACHABLE();
+  return Handle<String>();
+}
+
+
+TEST(StringCharacterStreamEdgeCases) {
+  printf("TestStringCharacterStreamEdgeCases\n");
+  TestStringCharacterStream(
+      BuildEdgeCaseConsString, kCharacterStreamNonRandomCases);
+}
+
+
+static const int kBalances = 3;
+static const int kTreeLengths = 4;
+static const int kEmptyLeaves = 4;
+static const int kUniqueRandomParameters =
+    kBalances*kTreeLengths*kEmptyLeaves;
+
+
+static void InitializeGenerationData(
+    int test_case, ConsStringGenerationData* data) {
+  // Clear the settings and reinit the rng.
+  data->Reset();
+  // Spin up the rng to a known location that is unique per test.
+  static const int kPerTestJump = 501;
+  for (int j = 0; j < test_case*kPerTestJump; j++) {
+    data->rng_.next();
+  }
+  // Choose balanced, left or right heavy trees.
+  switch (test_case % kBalances) {
+    case 0:
+      // Nothing to do.  Already balanced.
+      break;
+    case 1:
+      // Left balanced.
+      data->leftness_ = 0.90;
+      data->rightness_ = 0.15;
+      break;
+    case 2:
+      // Right balanced.
+      data->leftness_ = 0.15;
+      data->rightness_ = 0.90;
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  // Must remove the influence of the above decision.
+  test_case /= kBalances;
+  // Choose tree length.
+  switch (test_case % kTreeLengths) {
+    case 0:
+      data->max_leaves_ = 16;
+      data->early_termination_threshold_ = 0.2;
+      break;
+    case 1:
+      data->max_leaves_ = 50;
+      data->early_termination_threshold_ = 0.05;
+      break;
+    case 2:
+      data->max_leaves_ = 500;
+      data->early_termination_threshold_ = 0.03;
+      break;
+    case 3:
+      data->max_leaves_ = 5000;
+      data->early_termination_threshold_ = 0.001;
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  // Must remove the influence of the above decision.
+  test_case /= kTreeLengths;
+  // Choose how much we allow empty nodes, including not at all.
+  data->empty_leaf_threshold_ =
+      0.03 * static_cast<double>(test_case % kEmptyLeaves);
+}
+
+
+static Handle<String> BuildRandomConsString(
+    int test_case, ConsStringGenerationData* data) {
+  InitializeGenerationData(test_case, data);
+  return ConstructRandomString(data, 200);
+}
+
+
+TEST(StringCharacterStreamRandom) {
+  printf("StringCharacterStreamRandom\n");
+  TestStringCharacterStream(BuildRandomConsString, kUniqueRandomParameters*7);
+}
+
+
 static const int DEEP_ASCII_DEPTH = 100000;
 
 
diff --git a/test/message/overwritten-builtins.out b/test/message/overwritten-builtins.out
index ccf2924..db31bbf 100644
--- a/test/message/overwritten-builtins.out
+++ b/test/message/overwritten-builtins.out
@@ -28,3 +28,6 @@
 *%(basename)s:31: TypeError: Cannot read property 'x' of undefined
 undefined.x
          ^
+TypeError: Cannot read property 'x' of undefined
+    at *%(basename)s:31:10
+
diff --git a/test/mjsunit/debug-set-variable-value.js b/test/mjsunit/debug-set-variable-value.js
index dac8861..4667a71 100644
--- a/test/mjsunit/debug-set-variable-value.js
+++ b/test/mjsunit/debug-set-variable-value.js
@@ -34,8 +34,10 @@
 // A variable 'variable_name' must be initialized before debugger statement
 // and returned after the statement. The test will alter variable value when
 // on debugger statement and check that returned value reflects the change.
-function RunPauseTest(scope_number, variable_name, expected_new_value, fun) {
-  var old_value = fun();
+function RunPauseTest(scope_number, expected_old_result, variable_name,
+    new_value, expected_new_result, fun) {
+  var actual_old_result = fun();
+  assertEquals(expected_old_result, actual_old_result);
 
   var listener_delegate;
   var listener_called = false;
@@ -43,7 +45,7 @@
 
   function listener_delegate(exec_state) {
     var scope = exec_state.frame(0).scope(scope_number);
-    scope.setVariableValue(variable_name, expected_new_value);
+    scope.setVariableValue(variable_name, new_value);
   }
 
   function listener(event, exec_state, event_data, data) {
@@ -62,102 +64,233 @@
 
   var actual_new_value;
   try {
-    actual_new_value = fun();
+    actual_new_result = fun();
   } finally {
     Debug.setListener(null);
   }
 
   if (exception != null) {
-   assertUnreachable("Exception: " + exception);
+   assertUnreachable("Exception in listener\n" + exception.stack);
   }
   assertTrue(listener_called);
 
-  assertTrue(old_value != actual_new_value);
-  assertTrue(expected_new_value == actual_new_value);
+  assertEquals(expected_new_result, actual_new_result);
 }
 
 // Accepts a closure 'fun' that returns a variable from it's outer scope.
 // The test changes the value of variable via the handle to function and checks
 // that the return value changed accordingly.
-function RunClosureTest(scope_number, variable_name, expected_new_value, fun) {
-  var old_value = fun();
+function RunClosureTest(scope_number, expected_old_result, variable_name,
+    new_value, expected_new_result, fun) {
+  var actual_old_result = fun();
+  assertEquals(expected_old_result, actual_old_result);
 
   var fun_mirror = Debug.MakeMirror(fun);
 
   var scope = fun_mirror.scope(scope_number);
-  scope.setVariableValue(variable_name, expected_new_value);
+  scope.setVariableValue(variable_name, new_value);
 
-  var actual_new_value = fun();
+  var actual_new_result = fun();
 
-  assertTrue(old_value != actual_new_value);
-  assertTrue(expected_new_value == actual_new_value);
+  assertEquals(expected_new_result, actual_new_result);
 }
 
-// Test changing variable value when in pause
-RunPauseTest(1, 'v1', 5, (function Factory() {
-  var v1 = 'cat';
+
+function ClosureTestCase(scope_index, old_result, variable_name, new_value,
+    new_result, success_expected, factory) {
+  this.scope_index_ = scope_index;
+  this.old_result_ = old_result;
+  this.variable_name_ = variable_name;
+  this.new_value_ = new_value;
+  this.new_result_ = new_result;
+  this.success_expected_ = success_expected;
+  this.factory_ = factory;
+}
+
+ClosureTestCase.prototype.run_pause_test = function() {
+  var th = this;
+  var fun = this.factory_(true);
+  this.run_and_catch_(function() {
+    RunPauseTest(th.scope_index_ + 1, th.old_result_, th.variable_name_,
+        th.new_value_, th.new_result_, fun);
+  });
+}
+
+ClosureTestCase.prototype.run_closure_test = function() {
+  var th = this;
+  var fun = this.factory_(false);
+  this.run_and_catch_(function() {
+    RunClosureTest(th.scope_index_, th.old_result_, th.variable_name_,
+        th.new_value_, th.new_result_, fun);
+  });
+}
+
+ClosureTestCase.prototype.run_and_catch_ = function(runnable) {
+  if (this.success_expected_) {
+    runnable();
+  } else {
+    assertThrows(runnable);
+  }
+}
+
+
+// Test scopes visible from closures.
+
+var closure_test_cases = [
+  new ClosureTestCase(0, 'cat', 'v1', 5, 5, true,
+      function Factory(debug_stop) {
+    var v1 = 'cat';
+    return function() {
+      if (debug_stop) debugger;
+      return v1;
+    }
+  }),
+
+  new ClosureTestCase(0, 4, 't', 7, 9, true, function Factory(debug_stop) {
+    var t = 2;
+    var r = eval("t");
+    return function() {
+      if (debug_stop) debugger;
+      return r + t;
+    }
+  }),
+
+  new ClosureTestCase(0, 6, 't', 10, 13, true, function Factory(debug_stop) {
+    var t = 2;
+    var r = eval("t = 3");
+    return function() {
+      if (debug_stop) debugger;
+      return r + t;
+    }
+  }),
+
+  new ClosureTestCase(0, 17, 's', 'Bird', 'Bird', true,
+      function Factory(debug_stop) {
+    eval("var s = 17");
+    return function() {
+      if (debug_stop) debugger;
+      return s;
+    }
+  }),
+
+  new ClosureTestCase(2, 'capybara', 'foo', 77, 77, true,
+      function Factory(debug_stop) {
+    var foo = "capybara";
+    return (function() {
+      var bar = "fish";
+      try {
+        throw {name: "test exception"};
+      } catch (e) {
+        return function() {
+          if (debug_stop) debugger;
+          bar = "beast";
+          return foo;
+        }
+      }
+    })();
+  }),
+
+  new ClosureTestCase(0, 'AlphaBeta', 'eee', 5, '5Beta', true,
+      function Factory(debug_stop) {
+    var foo = "Beta";
+    return (function() {
+      var bar = "fish";
+      try {
+        throw "Alpha";
+      } catch (eee) {
+        return function() {
+          if (debug_stop) debugger;
+          return eee + foo;
+        }
+      }
+    })();
+  })
+];
+
+for (var i = 0; i < closure_test_cases.length; i++) {
+  closure_test_cases[i].run_pause_test();
+}
+
+for (var i = 0; i < closure_test_cases.length; i++) {
+  closure_test_cases[i].run_closure_test();
+}
+
+
+// Test local scope.
+
+RunPauseTest(0, 'HelloYou', 'u', 'We', 'HelloWe', (function Factory() {
   return function() {
+    var u = "You";
+    var v = "Hello";
+    debugger;
+    return v + u;
+  }
+})());
+
+RunPauseTest(0, 'Helloworld', 'p', 'GoodBye', 'HelloGoodBye',
+    (function Factory() {
+  function H(p) {
+    var v = "Hello";
+    debugger;
+    return v + p;
+  }
+  return function() {
+    return H("world");
+  }
+})());
+
+RunPauseTest(0, 'mouse', 'v1', 'dog', 'dog', (function Factory() {
+  return function() {
+    var v1 = 'cat';
+    eval("v1 = 'mouse'");
     debugger;
     return v1;
   }
 })());
 
-RunPauseTest(1, 'v2', 11, (function Factory(v2) {
+RunPauseTest(0, 'mouse', 'v1', 'dog', 'dog', (function Factory() {
   return function() {
+    eval("var v1 = 'mouse'");
     debugger;
-    return v2;
-  }
-})('dog'));
-
-RunPauseTest(3, 'foo', 77, (function Factory() {
-  var foo = "capybara";
-  return (function() {
-    var bar = "fish";
-    try {
-      throw {name: "test exception"};
-    } catch (e) {
-      return function() {
-        debugger;
-        bar = "beast";
-        return foo;
-      }
-    }
-  })();
-})());
-
-
-
-// Test changing variable value in closure by handle
-RunClosureTest(0, 'v1', 5, (function Factory() {
-  var v1 = 'cat';
-  return function() {
     return v1;
   }
 })());
 
-RunClosureTest(0, 'v2', 11, (function Factory(v2) {
-  return function() {
-    return v2;
-  }
-})('dog'));
 
-RunClosureTest(2, 'foo', 77, (function Factory() {
-  var foo = "capybara";
-  return (function() {
-    var bar = "fish";
-    try {
-      throw {name: "test exception"};
-    } catch (e) {
-      return function() {
-        bar = "beast";
-        return foo;
+// Check that we correctly update local variable that
+// is referenced from an inner closure.
+RunPauseTest(0, 'Blue', 'v', 'Green', 'Green', (function Factory() {
+  return function() {
+    function A() {
+      var v = "Blue";
+      function Inner() {
+        return void v;
       }
+      debugger;
+      return v;
     }
-  })();
+    return A();
+  }
+})());
+
+// Check that we correctly update parameter, that is known to be stored
+// both on stack and in heap.
+RunPauseTest(0, 5, 'p', 2012, 2012, (function Factory() {
+  return function() {
+    function A(p) {
+      function Inner() {
+        return void p;
+      }
+      debugger;
+      return p;
+    }
+    return A(5);
+  }
 })());
 
 
 // Test value description protocol JSON
+
 assertEquals(true, Debug.TestApi.CommandProcessorResolveValue({value: true}));
 
 assertSame(null, Debug.TestApi.CommandProcessorResolveValue({type: "null"}));
@@ -173,4 +306,3 @@
     {handle: Debug.MakeMirror(Number).handle()}));
 assertSame(RunClosureTest, Debug.TestApi.CommandProcessorResolveValue(
     {handle: Debug.MakeMirror(RunClosureTest).handle()}));
-
diff --git a/test/mjsunit/error-tostring.js b/test/mjsunit/error-tostring.js
index a285641..8a8a969 100644
--- a/test/mjsunit/error-tostring.js
+++ b/test/mjsunit/error-tostring.js
@@ -83,3 +83,11 @@
 assertEquals(["null: e2",[1,2,3,4]], testErrorToString(null, "e2"));
 assertEquals(["e2",[1,2,3,4]], testErrorToString("", "e2"));
 assertEquals(["e1: e2",[1,2,3,4]], testErrorToString("e1", "e2"));
+
+var obj = {
+  get constructor () {
+    assertUnreachable();
+  }
+};
+
+assertThrows(function() { obj.x(); });
diff --git a/test/mjsunit/eval-stack-trace.js b/test/mjsunit/eval-stack-trace.js
index 723d522..d83b84c 100644
--- a/test/mjsunit/eval-stack-trace.js
+++ b/test/mjsunit/eval-stack-trace.js
@@ -26,12 +26,13 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Return the stack frames of an Error object.
+
+Error.prepareStackTrace = function(error, frames) {
+  return frames;
+}
+
 Error.prototype.getFrames = function() {
-  Error.prepareStackTrace = function(error, frames) {
-    return frames;
-  }
   var frames = this.stack;
-  Error.prepareStackTrace = undefined;
   return frames;
 }
 
diff --git a/test/mjsunit/fast-prototype.js b/test/mjsunit/fast-prototype.js
index 7fd73a4..83bcffe 100644
--- a/test/mjsunit/fast-prototype.js
+++ b/test/mjsunit/fast-prototype.js
@@ -27,6 +27,10 @@
 
 // Flags: --allow-natives-syntax
 
+// TODO(mstarzinger): This test does not succeed when GCs happen in
+// between prototype transitions, we disable GC stress for now.
+// Flags: --noincremental-marking
+
 // Check that objects that are used for prototypes are in the fast mode.
 
 function Super() {
diff --git a/test/mjsunit/fuzz-natives-part1.js b/test/mjsunit/fuzz-natives-part1.js
index 87f7d0d..f459126 100644
--- a/test/mjsunit/fuzz-natives-part1.js
+++ b/test/mjsunit/fuzz-natives-part1.js
@@ -152,6 +152,7 @@
   "LazyRecompile": true,
   "ParallelRecompile": true,
   "NotifyDeoptimized": true,
+  "NotifyICMiss": true,
   "NotifyOSR": true,
   "CreateObjectLiteralBoilerplate": true,
   "CloneLiteralBoilerplate": true,
diff --git a/test/mjsunit/harmony/object-observe.js b/test/mjsunit/harmony/object-observe.js
index 04dfb96..4836eaa 100644
--- a/test/mjsunit/harmony/object-observe.js
+++ b/test/mjsunit/harmony/object-observe.js
@@ -26,6 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Flags: --harmony-observation --harmony-proxies --harmony-collections
+// Flags: --allow-natives-syntax
 
 var allObservers = [];
 function reset() {
@@ -64,8 +65,7 @@
     assertCallbackRecords: function(recs) {
       this.assertRecordCount(recs.length);
       for (var i = 0; i < recs.length; i++) {
-        if ('name' in recs[i])
-          recs[i].name = String(recs[i].name);
+        if ('name' in recs[i]) recs[i].name = String(recs[i].name);
         print(i, stringifyNoThrow(this.records[i]), stringifyNoThrow(recs[i]));
         assertSame(this.records[i].object, recs[i].object);
         assertEquals('string', typeof recs[i].type);
@@ -104,17 +104,20 @@
   enumerable: true
 })
 
+
 // Object.observe
 assertThrows(function() { Object.observe("non-object", observer.callback); }, TypeError);
 assertThrows(function() { Object.observe(obj, nonFunction); }, TypeError);
 assertThrows(function() { Object.observe(obj, frozenFunction); }, TypeError);
 assertEquals(obj, Object.observe(obj, observer.callback));
 
+
 // Object.unobserve
 assertThrows(function() { Object.unobserve(4, observer.callback); }, TypeError);
 assertThrows(function() { Object.unobserve(obj, nonFunction); }, TypeError);
 assertEquals(obj, Object.unobserve(obj, observer.callback));
 
+
 // Object.getNotifier
 var notifier = Object.getNotifier(obj);
 assertSame(notifier, Object.getNotifier(obj));
@@ -138,11 +141,13 @@
 notifier.notify(changeRecordWithAccessor);
 assertFalse(recordCreated);  // not observed yet
 
+
 // Object.deliverChangeRecords
 assertThrows(function() { Object.deliverChangeRecords(nonFunction); }, TypeError);
 
 Object.observe(obj, observer.callback);
 
+
 // notify uses to [[CreateOwnProperty]] to create changeRecord;
 reset();
 var protoExpandoAccessed = false;
@@ -157,6 +162,7 @@
 delete Object.prototype.protoExpando;
 Object.deliverChangeRecords(observer.callback);
 
+
 // Multiple records are delivered.
 reset();
 notifier.notify({
@@ -177,11 +183,13 @@
   { object: obj, name: 'bar', type: 'deleted', expando2: 'str' }
 ]);
 
+
 // No delivery takes place if no records are pending
 reset();
 Object.deliverChangeRecords(observer.callback);
 observer.assertNotCalled();
 
+
 // Multiple observation has no effect.
 reset();
 Object.observe(obj, observer.callback);
@@ -192,6 +200,7 @@
 Object.deliverChangeRecords(observer.callback);
 observer.assertCalled();
 
+
 // Observation can be stopped.
 reset();
 Object.unobserve(obj, observer.callback);
@@ -201,6 +210,7 @@
 Object.deliverChangeRecords(observer.callback);
 observer.assertNotCalled();
 
+
 // Multiple unobservation has no effect
 reset();
 Object.unobserve(obj, observer.callback);
@@ -211,6 +221,7 @@
 Object.deliverChangeRecords(observer.callback);
 observer.assertNotCalled();
 
+
 // Re-observation works and only includes changeRecords after of call.
 reset();
 Object.getNotifier(obj).notify({
@@ -224,6 +235,7 @@
 Object.deliverChangeRecords(observer.callback);
 observer.assertRecordCount(1);
 
+
 // Observing a continuous stream of changes, while itermittantly unobserving.
 reset();
 Object.observe(obj, observer.callback);
@@ -264,6 +276,7 @@
   { object: obj, type: 'foo', val: 5 }
 ]);
 
+
 // Observing multiple objects; records appear in order.
 reset();
 var obj2 = {};
@@ -288,6 +301,37 @@
   { object: obj3, type: 'foo3' }
 ]);
 
+
+// Recursive observation.
+var obj = {a: 1};
+var callbackCount = 0;
+function recursiveObserver(r) {
+  assertEquals(1, r.length);
+  ++callbackCount;
+  if (r[0].oldValue < 100) ++obj[r[0].name];
+}
+Object.observe(obj, recursiveObserver);
+++obj.a;
+Object.deliverChangeRecords(recursiveObserver);
+assertEquals(100, callbackCount);
+
+var obj1 = {a: 1};
+var obj2 = {a: 1};
+var recordCount = 0;
+function recursiveObserver2(r) {
+  recordCount += r.length;
+  if (r[0].oldValue < 100) {
+    ++obj1.a;
+    ++obj2.a;
+  }
+}
+Object.observe(obj1, recursiveObserver2);
+Object.observe(obj2, recursiveObserver2);
+++obj1.a;
+Object.deliverChangeRecords(recursiveObserver2);
+assertEquals(199, recordCount);
+
+
 // Observing named properties.
 reset();
 var obj = {a: 1}
@@ -314,6 +358,9 @@
 Object.defineProperty(obj, "a", {get: function() {}, configurable: true});
 Object.defineProperty(obj, "a", {value: 9, writable: true});
 obj.a = 10;
+++obj.a;
+obj.a++;
+obj.a *= 3;
 delete obj.a;
 Object.defineProperty(obj, "a", {value: 11, configurable: true});
 Object.deliverChangeRecords(observer.callback);
@@ -335,10 +382,14 @@
   { object: obj, name: "a", type: "new" },
   { object: obj, name: "a", type: "reconfigured" },
   { object: obj, name: "a", type: "updated", oldValue: 9 },
-  { object: obj, name: "a", type: "deleted", oldValue: 10 },
+  { object: obj, name: "a", type: "updated", oldValue: 10 },
+  { object: obj, name: "a", type: "updated", oldValue: 11 },
+  { object: obj, name: "a", type: "updated", oldValue: 12 },
+  { object: obj, name: "a", type: "deleted", oldValue: 36 },
   { object: obj, name: "a", type: "new" },
 ]);
 
+
 // Observing indexed properties.
 reset();
 var obj = {'1': 1}
@@ -365,6 +416,9 @@
 Object.defineProperty(obj, "1", {get: function() {}, configurable: true});
 Object.defineProperty(obj, "1", {value: 9, writable: true});
 obj[1] = 10;
+++obj[1];
+obj[1]++;
+obj[1] *= 3;
 delete obj[1];
 Object.defineProperty(obj, "1", {value: 11, configurable: true});
 Object.deliverChangeRecords(observer.callback);
@@ -386,7 +440,10 @@
   { object: obj, name: "1", type: "new" },
   { object: obj, name: "1", type: "reconfigured" },
   { object: obj, name: "1", type: "updated", oldValue: 9 },
-  { object: obj, name: "1", type: "deleted", oldValue: 10 },
+  { object: obj, name: "1", type: "updated", oldValue: 10 },
+  { object: obj, name: "1", type: "updated", oldValue: 11 },
+  { object: obj, name: "1", type: "updated", oldValue: 12 },
+  { object: obj, name: "1", type: "deleted", oldValue: 36 },
   { object: obj, name: "1", type: "new" },
 ]);
 
@@ -423,6 +480,9 @@
   Object.defineProperty(obj, prop, {get: function() {}, configurable: true});
   Object.defineProperty(obj, prop, {value: 9, writable: true});
   obj[prop] = 10;
+  ++obj[prop];
+  obj[prop]++;
+  obj[prop] *= 3;
   delete obj[prop];
   Object.defineProperty(obj, prop, {value: 11, configurable: true});
   Object.deliverChangeRecords(observer.callback);
@@ -448,7 +508,10 @@
     { object: obj, name: prop, type: "new" },
     { object: obj, name: prop, type: "reconfigured" },
     { object: obj, name: prop, type: "updated", oldValue: 9 },
-    { object: obj, name: prop, type: "deleted", oldValue: 10 },
+    { object: obj, name: prop, type: "updated", oldValue: 10 },
+    { object: obj, name: prop, type: "updated", oldValue: 11 },
+    { object: obj, name: prop, type: "updated", oldValue: 12 },
+    { object: obj, name: prop, type: "deleted", oldValue: 36 },
     { object: obj, name: prop, type: "new" },
   ]);
   Object.unobserve(obj, observer.callback);
@@ -465,8 +528,7 @@
   Object.defineProperty(obj, prop, {value: 6});
   Object.defineProperty(obj, prop, {value: 6});  // ignored
   Object.defineProperty(obj, prop, {value: 7});
-  Object.defineProperty(obj, prop,
-                        {enumerable: desc.enumerable});  // ignored
+  Object.defineProperty(obj, prop, {enumerable: desc.enumerable});  // ignored
   Object.defineProperty(obj, prop, {writable: false});
   obj[prop] = 7;  // ignored
   Object.deliverChangeRecords(observer.callback);
@@ -535,16 +597,13 @@
   createProxy(Proxy.create, null),
   createProxy(Proxy.createFunction, function(){}),
 ];
-var properties = ["a", "1", 1, "length", "prototype"];
+var properties = ["a", "1", 1, "length", "prototype", "name", "caller"];
 
 // Cases that yield non-standard results.
-// TODO(observe): ...or don't work yet.
 function blacklisted(obj, prop) {
   return (obj instanceof Int32Array && prop == 1) ||
-    (obj instanceof Int32Array && prop === "length") ||
-    (obj instanceof ArrayBuffer && prop == 1) ||
-    // TODO(observe): oldValue when reconfiguring array length
-    (obj instanceof Array && prop === "length")
+         (obj instanceof Int32Array && prop === "length") ||
+         (obj instanceof ArrayBuffer && prop == 1)
 }
 
 for (var i in objects) for (var j in properties) {
@@ -580,9 +639,14 @@
 arr.length = 2;
 arr.length = 0;
 arr.length = 10;
+Object.defineProperty(arr, 'length', {writable: false});
 arr2.length = 0;
 arr2.length = 1; // no change expected
+Object.defineProperty(arr2, 'length', {value: 1, writable: false});
 arr3.length = 0;
+++arr3.length;
+arr3.length++;
+arr3.length /= 2;
 Object.defineProperty(arr3, 'length', {value: 5});
 Object.defineProperty(arr3, 'length', {value: 10, writable: false});
 Object.deliverChangeRecords(observer.callback);
@@ -593,17 +657,21 @@
   { object: arr, name: '1', type: 'deleted', oldValue: 'b' },
   { object: arr, name: 'length', type: 'updated', oldValue: 2 },
   { object: arr, name: 'length', type: 'updated', oldValue: 1 },
+  { object: arr, name: 'length', type: 'reconfigured', oldValue: 10 },
   { object: arr2, name: '1', type: 'deleted', oldValue: 'beta' },
   { object: arr2, name: 'length', type: 'updated', oldValue: 2 },
+  { object: arr2, name: 'length', type: 'reconfigured', oldValue: 1 },
   { object: arr3, name: '2', type: 'deleted', oldValue: 'goodbye' },
   { object: arr3, name: '0', type: 'deleted', oldValue: 'hello' },
   { object: arr3, name: 'length', type: 'updated', oldValue: 6 },
   { object: arr3, name: 'length', type: 'updated', oldValue: 0 },
-  { object: arr3, name: 'length', type: 'updated', oldValue: 5 },
-  // TODO(adamk): This record should be merged with the above
-  { object: arr3, name: 'length', type: 'reconfigured' },
+  { object: arr3, name: 'length', type: 'updated', oldValue: 1 },
+  { object: arr3, name: 'length', type: 'updated', oldValue: 2 },
+  { object: arr3, name: 'length', type: 'updated', oldValue: 1 },
+  { object: arr3, name: 'length', type: 'reconfigured', oldValue: 5 },
 ]);
 
+
 // Assignments in loops (checking different IC states).
 reset();
 var obj = {};
@@ -635,6 +703,7 @@
   { object: obj, name: "4", type: "new" },
 ]);
 
+
 // Adding elements past the end of an array should notify on length
 reset();
 var arr = [1, 2, 3];
@@ -657,6 +726,7 @@
   { object: arr, name: '50', type: 'new' },
 ]);
 
+
 // Tests for array methods, first on arrays and then on plain objects
 //
 // === ARRAYS ===
@@ -730,6 +800,7 @@
   { object: array, name: '2', type: 'updated', oldValue: 3 },
 ]);
 
+
 //
 // === PLAIN OBJECTS ===
 //
@@ -836,6 +907,7 @@
   { object: obj, name: '__proto__', type: 'prototype', oldValue: null },
 ]);
 
+
 // Function.prototype
 reset();
 var fun = function(){};
@@ -871,3 +943,112 @@
 obj.prototype = 7;
 Object.deliverChangeRecords(observer.callback);
 observer.assertNotCalled();
+
+
+// Check that changes in observation status are detected in all IC states and
+// in optimized code, especially in cases usually using fast elements.
+var mutation = [
+  "a[i] = v",
+  "a[i] ? ++a[i] : a[i] = v",
+  "a[i] ? a[i]++ : a[i] = v",
+  "a[i] ? a[i] += 1 : a[i] = v",
+  "a[i] ? a[i] -= -1 : a[i] = v",
+];
+
+var props = [1, "1", "a"];
+
+function TestFastElements(prop, mutation, prepopulate, polymorphic, optimize) {
+  var setElement = eval(
+    "(function setElement(a, i, v) { " + mutation + "; " +
+    "/* " + [].join.call(arguments, " ") + " */" +
+    "})"
+  );
+  print("TestFastElements:", setElement);
+
+  var arr = prepopulate ? [1, 2, 3, 4, 5] : [0];
+  if (prepopulate) arr[prop] = 2;  // for non-element case
+  setElement(arr, prop, 3);
+  setElement(arr, prop, 4);
+  if (polymorphic) setElement(["M", "i", "l", "n", "e", "r"], 0, "m");
+  if (optimize) %OptimizeFunctionOnNextCall(setElement);
+  setElement(arr, prop, 5);
+
+  reset();
+  Object.observe(arr, observer.callback);
+  setElement(arr, prop, 989898);
+  Object.deliverChangeRecords(observer.callback);
+  observer.assertCallbackRecords([
+    { object: arr, name: "" + prop, type: 'updated', oldValue: 5 }
+  ]);
+}
+
+for (var b1 = 0; b1 < 2; ++b1)
+  for (var b2 = 0; b2 < 2; ++b2)
+    for (var b3 = 0; b3 < 2; ++b3)
+      for (var i in props)
+        for (var j in mutation)
+          TestFastElements(props[i], mutation[j], b1 != 0, b2 != 0, b3 != 0);
+
+
+var mutation = [
+  "a.length = v",
+  "a.length += newSize - oldSize",
+  "a.length -= oldSize - newSize",
+];
+
+var mutationByIncr = [
+  "++a.length",
+  "a.length++",
+];
+
+function TestFastElementsLength(
+  mutation, polymorphic, optimize, oldSize, newSize) {
+  var setLength = eval(
+    "(function setLength(a, v) { " + mutation + "; " +
+    "/* " + [].join.call(arguments, " ") + " */"
+    + "})"
+  );
+  print("TestFastElementsLength:", setLength);
+
+  function array(n) {
+    var arr = new Array(n);
+    for (var i = 0; i < n; ++i) arr[i] = i;
+    return arr;
+  }
+
+  setLength(array(oldSize), newSize);
+  setLength(array(oldSize), newSize);
+  if (polymorphic) setLength(array(oldSize).map(isNaN), newSize);
+  if (optimize) %OptimizeFunctionOnNextCall(setLength);
+  setLength(array(oldSize), newSize);
+
+  reset();
+  var arr = array(oldSize);
+  Object.observe(arr, observer.callback);
+  setLength(arr, newSize);
+  Object.deliverChangeRecords(observer.callback);
+  if (oldSize === newSize) {
+    observer.assertNotCalled();
+  } else {
+    var count = oldSize > newSize ? oldSize - newSize : 0;
+    observer.assertRecordCount(count + 1);
+    var lengthRecord = observer.records[count];
+    assertSame(arr, lengthRecord.object);
+    assertEquals('length', lengthRecord.name);
+    assertEquals('updated', lengthRecord.type);
+    assertSame(oldSize, lengthRecord.oldValue);
+  }
+}
+
+for (var b1 = 0; b1 < 2; ++b1)
+  for (var b2 = 0; b2 < 2; ++b2)
+    for (var n1 = 0; n1 < 3; ++n1)
+      for (var n2 = 0; n2 < 3; ++n2)
+        for (var i in mutation)
+          TestFastElementsLength(mutation[i], b1 != 0, b2 != 0, 20*n1, 20*n2);
+
+for (var b1 = 0; b1 < 2; ++b1)
+  for (var b2 = 0; b2 < 2; ++b2)
+    for (var n = 0; n < 3; ++n)
+      for (var i in mutationByIncr)
+        TestFastElementsLength(mutationByIncr[i], b1 != 0, b2 != 0, 7*n, 7*n+1);
diff --git a/test/mjsunit/math-floor-of-div-nosudiv.js b/test/mjsunit/math-floor-of-div-nosudiv.js
new file mode 100644
index 0000000..7155e85
--- /dev/null
+++ b/test/mjsunit/math-floor-of-div-nosudiv.js
@@ -0,0 +1,230 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --nouse_inlining --noenable_sudiv
+
+// Use this function as reference. Make sure it is not inlined.
+function div(a, b) {
+  return a / b;
+}
+
+var limit = 0x1000000;
+var exhaustive_limit = 100;
+var step = 10;
+var values = [0x10000001,
+              0x12345678,
+              -0x789abcdf,  // 0x87654321
+              0x01234567,
+              0x76543210,
+              -0x80000000,  // 0x80000000
+              0x7fffffff,
+              -0x0fffffff,  // 0xf0000001
+              0x00000010,
+              -0x01000000   // 0xff000000
+              ];
+
+function test_div() {
+  var c = 0;
+  for (var k = 0; k <= limit; k++) {
+    if (k > exhaustive_limit) { c += step; k += c; }
+    assertEquals(Math.floor(div(k,   1)), Math.floor(k /   1));
+    assertEquals(Math.floor(div(k,   -1)), Math.floor(k /   -1));
+    assertEquals(Math.floor(div(k,   2)), Math.floor(k /   2));
+    assertEquals(Math.floor(div(k,   -2)), Math.floor(k /   -2));
+    assertEquals(Math.floor(div(k,   3)), Math.floor(k /   3));
+    assertEquals(Math.floor(div(k,   -3)), Math.floor(k /   -3));
+    assertEquals(Math.floor(div(k,   4)), Math.floor(k /   4));
+    assertEquals(Math.floor(div(k,   -4)), Math.floor(k /   -4));
+    assertEquals(Math.floor(div(k,   5)), Math.floor(k /   5));
+    assertEquals(Math.floor(div(k,   -5)), Math.floor(k /   -5));
+    assertEquals(Math.floor(div(k,   6)), Math.floor(k /   6));
+    assertEquals(Math.floor(div(k,   -6)), Math.floor(k /   -6));
+    assertEquals(Math.floor(div(k,   7)), Math.floor(k /   7));
+    assertEquals(Math.floor(div(k,   -7)), Math.floor(k /   -7));
+    assertEquals(Math.floor(div(k,   8)), Math.floor(k /   8));
+    assertEquals(Math.floor(div(k,   -8)), Math.floor(k /   -8));
+    assertEquals(Math.floor(div(k,   9)), Math.floor(k /   9));
+    assertEquals(Math.floor(div(k,   -9)), Math.floor(k /   -9));
+    assertEquals(Math.floor(div(k,  10)), Math.floor(k /  10));
+    assertEquals(Math.floor(div(k,  -10)), Math.floor(k /  -10));
+    assertEquals(Math.floor(div(k,  11)), Math.floor(k /  11));
+    assertEquals(Math.floor(div(k,  -11)), Math.floor(k /  -11));
+    assertEquals(Math.floor(div(k,  12)), Math.floor(k /  12));
+    assertEquals(Math.floor(div(k,  -12)), Math.floor(k /  -12));
+    assertEquals(Math.floor(div(k,  13)), Math.floor(k /  13));
+    assertEquals(Math.floor(div(k,  -13)), Math.floor(k /  -13));
+    assertEquals(Math.floor(div(k,  14)), Math.floor(k /  14));
+    assertEquals(Math.floor(div(k,  -14)), Math.floor(k /  -14));
+    assertEquals(Math.floor(div(k,  15)), Math.floor(k /  15));
+    assertEquals(Math.floor(div(k,  -15)), Math.floor(k /  -15));
+    assertEquals(Math.floor(div(k,  16)), Math.floor(k /  16));
+    assertEquals(Math.floor(div(k,  -16)), Math.floor(k /  -16));
+    assertEquals(Math.floor(div(k,  17)), Math.floor(k /  17));
+    assertEquals(Math.floor(div(k,  -17)), Math.floor(k /  -17));
+    assertEquals(Math.floor(div(k,  18)), Math.floor(k /  18));
+    assertEquals(Math.floor(div(k,  -18)), Math.floor(k /  -18));
+    assertEquals(Math.floor(div(k,  19)), Math.floor(k /  19));
+    assertEquals(Math.floor(div(k,  -19)), Math.floor(k /  -19));
+    assertEquals(Math.floor(div(k,  20)), Math.floor(k /  20));
+    assertEquals(Math.floor(div(k,  -20)), Math.floor(k /  -20));
+    assertEquals(Math.floor(div(k,  21)), Math.floor(k /  21));
+    assertEquals(Math.floor(div(k,  -21)), Math.floor(k /  -21));
+    assertEquals(Math.floor(div(k,  22)), Math.floor(k /  22));
+    assertEquals(Math.floor(div(k,  -22)), Math.floor(k /  -22));
+    assertEquals(Math.floor(div(k,  23)), Math.floor(k /  23));
+    assertEquals(Math.floor(div(k,  -23)), Math.floor(k /  -23));
+    assertEquals(Math.floor(div(k,  24)), Math.floor(k /  24));
+    assertEquals(Math.floor(div(k,  -24)), Math.floor(k /  -24));
+    assertEquals(Math.floor(div(k,  25)), Math.floor(k /  25));
+    assertEquals(Math.floor(div(k,  -25)), Math.floor(k /  -25));
+    assertEquals(Math.floor(div(k, 125)), Math.floor(k / 125));
+    assertEquals(Math.floor(div(k, -125)), Math.floor(k / -125));
+    assertEquals(Math.floor(div(k, 625)), Math.floor(k / 625));
+    assertEquals(Math.floor(div(k, -625)), Math.floor(k / -625));
+  }
+  c = 0;
+  for (var k = 0; k <= limit; k++) {
+    if (k > exhaustive_limit) { c += step; k += c; }
+    assertEquals(Math.floor(div(-k,   1)), Math.floor(-k /   1));
+    assertEquals(Math.floor(div(-k,   -1)), Math.floor(-k /   -1));
+    assertEquals(Math.floor(div(-k,   2)), Math.floor(-k /   2));
+    assertEquals(Math.floor(div(-k,   -2)), Math.floor(-k /   -2));
+    assertEquals(Math.floor(div(-k,   3)), Math.floor(-k /   3));
+    assertEquals(Math.floor(div(-k,   -3)), Math.floor(-k /   -3));
+    assertEquals(Math.floor(div(-k,   4)), Math.floor(-k /   4));
+    assertEquals(Math.floor(div(-k,   -4)), Math.floor(-k /   -4));
+    assertEquals(Math.floor(div(-k,   5)), Math.floor(-k /   5));
+    assertEquals(Math.floor(div(-k,   -5)), Math.floor(-k /   -5));
+    assertEquals(Math.floor(div(-k,   6)), Math.floor(-k /   6));
+    assertEquals(Math.floor(div(-k,   -6)), Math.floor(-k /   -6));
+    assertEquals(Math.floor(div(-k,   7)), Math.floor(-k /   7));
+    assertEquals(Math.floor(div(-k,   -7)), Math.floor(-k /   -7));
+    assertEquals(Math.floor(div(-k,   8)), Math.floor(-k /   8));
+    assertEquals(Math.floor(div(-k,   -8)), Math.floor(-k /   -8));
+    assertEquals(Math.floor(div(-k,   9)), Math.floor(-k /   9));
+    assertEquals(Math.floor(div(-k,   -9)), Math.floor(-k /   -9));
+    assertEquals(Math.floor(div(-k,  10)), Math.floor(-k /  10));
+    assertEquals(Math.floor(div(-k,  -10)), Math.floor(-k /  -10));
+    assertEquals(Math.floor(div(-k,  11)), Math.floor(-k /  11));
+    assertEquals(Math.floor(div(-k,  -11)), Math.floor(-k /  -11));
+    assertEquals(Math.floor(div(-k,  12)), Math.floor(-k /  12));
+    assertEquals(Math.floor(div(-k,  -12)), Math.floor(-k /  -12));
+    assertEquals(Math.floor(div(-k,  13)), Math.floor(-k /  13));
+    assertEquals(Math.floor(div(-k,  -13)), Math.floor(-k /  -13));
+    assertEquals(Math.floor(div(-k,  14)), Math.floor(-k /  14));
+    assertEquals(Math.floor(div(-k,  -14)), Math.floor(-k /  -14));
+    assertEquals(Math.floor(div(-k,  15)), Math.floor(-k /  15));
+    assertEquals(Math.floor(div(-k,  -15)), Math.floor(-k /  -15));
+    assertEquals(Math.floor(div(-k,  16)), Math.floor(-k /  16));
+    assertEquals(Math.floor(div(-k,  -16)), Math.floor(-k /  -16));
+    assertEquals(Math.floor(div(-k,  17)), Math.floor(-k /  17));
+    assertEquals(Math.floor(div(-k,  -17)), Math.floor(-k /  -17));
+    assertEquals(Math.floor(div(-k,  18)), Math.floor(-k /  18));
+    assertEquals(Math.floor(div(-k,  -18)), Math.floor(-k /  -18));
+    assertEquals(Math.floor(div(-k,  19)), Math.floor(-k /  19));
+    assertEquals(Math.floor(div(-k,  -19)), Math.floor(-k /  -19));
+    assertEquals(Math.floor(div(-k,  20)), Math.floor(-k /  20));
+    assertEquals(Math.floor(div(-k,  -20)), Math.floor(-k /  -20));
+    assertEquals(Math.floor(div(-k,  21)), Math.floor(-k /  21));
+    assertEquals(Math.floor(div(-k,  -21)), Math.floor(-k /  -21));
+    assertEquals(Math.floor(div(-k,  22)), Math.floor(-k /  22));
+    assertEquals(Math.floor(div(-k,  -22)), Math.floor(-k /  -22));
+    assertEquals(Math.floor(div(-k,  23)), Math.floor(-k /  23));
+    assertEquals(Math.floor(div(-k,  -23)), Math.floor(-k /  -23));
+    assertEquals(Math.floor(div(-k,  24)), Math.floor(-k /  24));
+    assertEquals(Math.floor(div(-k,  -24)), Math.floor(-k /  -24));
+    assertEquals(Math.floor(div(-k,  25)), Math.floor(-k /  25));
+    assertEquals(Math.floor(div(-k,  -25)), Math.floor(-k /  -25));
+    assertEquals(Math.floor(div(-k, 125)), Math.floor(-k / 125));
+    assertEquals(Math.floor(div(-k, -125)), Math.floor(-k / -125));
+    assertEquals(Math.floor(div(-k, 625)), Math.floor(-k / 625));
+    assertEquals(Math.floor(div(-k, -625)), Math.floor(-k / -625));
+  }
+  // Test for edge cases.
+  // Use (values[key] | 0) to force the integer type.
+  for (var i = 0; i < values.length; i++) {
+    for (var j = 0; j < values.length; j++) {
+      assertEquals(Math.floor(div((values[i] | 0), (values[j] | 0))),
+                   Math.floor((values[i] | 0) / (values[j] | 0)));
+      assertEquals(Math.floor(div(-(values[i] | 0), (values[j] | 0))),
+                   Math.floor(-(values[i] | 0) / (values[j] | 0)));
+      assertEquals(Math.floor(div((values[i] | 0), -(values[j] | 0))),
+                   Math.floor((values[i] | 0) / -(values[j] | 0)));
+      assertEquals(Math.floor(div(-(values[i] | 0), -(values[j] | 0))),
+                   Math.floor(-(values[i] | 0) / -(values[j] | 0)));
+    }
+  }
+}
+
+test_div();
+%OptimizeFunctionOnNextCall(test_div);
+test_div();
+
+// Test for negative zero, overflow and division by 0.
+// Separate the tests to prevent deoptimizations from making the other optimized
+// test unreachable.
+
+function IsNegativeZero(x) {
+  assertTrue(x == 0);  // Is 0 or -0.
+  var y = 1 / x;
+  assertFalse(isFinite(y));
+  return y < 0;
+}
+
+function test_div_deopt_minus_zero() {
+  var zero_in_array = [0];
+  for (var i = 0; i < 2; ++i) {
+    assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) / -1)));
+  }
+}
+
+function test_div_deopt_overflow() {
+  // We box the value in an array to avoid constant propagation.
+  var min_int_in_array = [-2147483648];
+  for (var i = 0; i < 2; ++i) {
+    // We use '| 0' to force the representation to int32.
+    assertEquals(-min_int_in_array[0],
+                 Math.floor((min_int_in_array[0] | 0) / -1));
+  }
+}
+
+function test_div_deopt_div_by_zero() {
+  for (var i = 0; i < 2; ++i) {
+    assertEquals(div(i, 0),
+                 Math.floor(i / 0));
+  }
+}
+
+test_div_deopt_minus_zero();
+test_div_deopt_overflow();
+test_div_deopt_div_by_zero();
+%OptimizeFunctionOnNextCall(test_div_deopt_minus_zero);
+%OptimizeFunctionOnNextCall(test_div_deopt_overflow);
+%OptimizeFunctionOnNextCall(test_div_deopt_div_by_zero);
+test_div_deopt_minus_zero();
+test_div_deopt_overflow();
+test_div_deopt_div_by_zero();
diff --git a/test/mjsunit/math-floor-of-div.js b/test/mjsunit/math-floor-of-div.js
index e917182..35bc3e4 100644
--- a/test/mjsunit/math-floor-of-div.js
+++ b/test/mjsunit/math-floor-of-div.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --nouse_inlining
+// Flags: --allow-natives-syntax --nouse_inlining --enable_sudiv
 
 // Use this function as reference. Make sure it is not inlined.
 function div(a, b) {
@@ -184,7 +184,7 @@
 %OptimizeFunctionOnNextCall(test_div);
 test_div();
 
-// Test for negative zero and overflow.
+// Test for negative zero, overflow and division by 0.
 // Separate the tests to prevent deoptimizations from making the other optimized
 // test unreachable.
 
@@ -197,20 +197,34 @@
 
 function test_div_deopt_minus_zero() {
   var zero_in_array = [0];
-  assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) / -1)));
+  for (var i = 0; i < 2; ++i) {
+    assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) / -1)));
+  }
 }
 
 function test_div_deopt_overflow() {
   // We box the value in an array to avoid constant propagation.
   var min_int_in_array = [-2147483648];
-  // We use '| 0' to force the representation to int32.
-  assertEquals(-min_int_in_array[0],
-               Math.floor((min_int_in_array[0] | 0) / -1));
+  for (var i = 0; i < 2; ++i) {
+    // We use '| 0' to force the representation to int32.
+    assertEquals(-min_int_in_array[0],
+                 Math.floor((min_int_in_array[0] | 0) / -1));
+  }
+}
+
+function test_div_deopt_div_by_zero() {
+  for (var i = 0; i < 2; ++i) {
+    assertEquals(div(i, 0),
+                 Math.floor(i / 0));
+  }
 }
 
 test_div_deopt_minus_zero();
 test_div_deopt_overflow();
+test_div_deopt_div_by_zero();
 %OptimizeFunctionOnNextCall(test_div_deopt_minus_zero);
 %OptimizeFunctionOnNextCall(test_div_deopt_overflow);
+%OptimizeFunctionOnNextCall(test_div_deopt_div_by_zero);
 test_div_deopt_minus_zero();
 test_div_deopt_overflow();
+test_div_deopt_div_by_zero();
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 0bf378b..da961ca 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -77,6 +77,10 @@
 tools/tickprocessor: PASS, SKIP if ($arch == android_arm || $arch == android_ia32)
 
 ##############################################################################
+# This test is the same as math-floor-of-div for non ARM architectures.
+math-floor-of-div-nosudiv: PASS, SKIP if ($arch != arm && $arch != android_arm)
+
+##############################################################################
 [ $arch == arm || $arch == android_arm ]
 
 # Slow tests which times out in debug mode.
diff --git a/test/mjsunit/regress/regress-165637.js b/test/mjsunit/regress/regress-165637.js
new file mode 100644
index 0000000..84c9041
--- /dev/null
+++ b/test/mjsunit/regress/regress-165637.js
@@ -0,0 +1,57 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Should not take a very long time (n^2 algorithms are bad)
+
+function do_slices() {
+  var data = new Array(1024 * 12); // 12kB
+
+  for (var i = 0; i < data.length; i++) {
+    data[i] = 255;
+  }
+
+  var start = Date.now();
+
+  for (i = 0; i < 20000; i++) {
+    data.slice(4, 1);
+  }
+
+  return Date.now() - start;
+}
+
+// Should never take more than 3 seconds (if the bug is fixed, the test takes
+// considerably less time than 3 seconds).
+assertTrue(do_slices() < (3 * 1000));
+
+// Make sure that packed and unpacked array slices are still properly handled
+var holey_array = [1, 2, 3, 4, 5,,,,,,];
+assertFalse(%HasFastHoleyElements(holey_array.slice(6, 1)));
+assertEquals(undefined, holey_array.slice(6, 7)[0])
+assertFalse(%HasFastHoleyElements(holey_array.slice(2, 1)));
+assertEquals(3, holey_array.slice(2, 3)[0])
diff --git a/test/mjsunit/regress/regress-166379.js b/test/mjsunit/regress/regress-166379.js
new file mode 100644
index 0000000..b19afbd
--- /dev/null
+++ b/test/mjsunit/regress/regress-166379.js
@@ -0,0 +1,39 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function mod(a, b) { return a % b; }
+
+// Feed integer type info and optimize.
+assertEquals(0, mod(4, 2));
+assertEquals(1, mod(3, 2));
+%OptimizeFunctionOnNextCall(mod);
+
+// Surprise mod with overflow.
+assertEquals(-Infinity, 1/mod(-2147483648, -1));
+
diff --git a/test/mjsunit/regress/regress-166553.js b/test/mjsunit/regress/regress-166553.js
new file mode 100644
index 0000000..acaf34f
--- /dev/null
+++ b/test/mjsunit/regress/regress-166553.js
@@ -0,0 +1,33 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose_gc
+
+JSON.stringify(String.fromCharCode(1, -11).toString())
+gc();
+var s = String.fromCharCode(1, -11)
+assertEquals(65525, s.charCodeAt(1))
diff --git a/test/mjsunit/regress/regress-2243.js b/test/mjsunit/regress/regress-2243.js
new file mode 100644
index 0000000..31c2e55
--- /dev/null
+++ b/test/mjsunit/regress/regress-2243.js
@@ -0,0 +1,31 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-scoping
+
+assertThrows("'use strict'; (function f() { f = 123; })", SyntaxError);
+assertThrows("(function f() { 'use strict'; f = 123; })", SyntaxError);
diff --git a/test/mjsunit/regress/regress-observe-empty-double-array.js b/test/mjsunit/regress/regress-observe-empty-double-array.js
index aea9c73..4b65169 100644
--- a/test/mjsunit/regress/regress-observe-empty-double-array.js
+++ b/test/mjsunit/regress/regress-observe-empty-double-array.js
@@ -32,6 +32,7 @@
 arr = [1.1];
 Object.observe(arr, function(){});
 arr.length = 0;
-assertTrue(%HasFastDoubleElements(arr));
+// TODO(observe): we currently disallow fast elements for observed object.
+// assertTrue(%HasFastDoubleElements(arr));
 // Should not crash
 arr.push(1.1);
diff --git a/test/mjsunit/stack-traces-gc.js b/test/mjsunit/stack-traces-gc.js
new file mode 100644
index 0000000..dd878f2
--- /dev/null
+++ b/test/mjsunit/stack-traces-gc.js
@@ -0,0 +1,119 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc --allow-natives-syntax
+
+var fired = [];
+for (var i = 0; i < 100; i++) fired[i] = false;
+
+function getter_function(i) {
+  return %MarkOneShotGetter( function() { fired[i] = true; } );
+}
+
+// Error objects that die young.
+for (var i = 0; i < 100; i++) {
+  var error = new Error();
+  // Replace the getter to observe whether it has been fired,
+  // and disguise it as original getter.
+  var getter = getter_function(i);
+  error.__defineGetter__("stack", getter);
+
+  error = undefined;
+}
+
+gc();
+for (var i = 0; i < 100; i++) {
+  assertFalse(fired[i]);
+}
+
+// Error objects that are kept alive.
+var array = [];
+for (var i = 0; i < 100; i++) {
+  var error = new Error();
+  var getter = getter_function(i);
+  // Replace the getter to observe whether it has been fired,
+  // and disguise it as original getter.
+  error.__defineGetter__("stack", getter);
+
+  array.push(error);
+  error = undefined;
+}
+
+gc();
+// We don't expect all stack traces to be formatted after only one GC.
+assertTrue(fired[0]);
+
+for (var i = 0; i < 10; i++) gc();
+for (var i = 0; i < 100; i++) assertTrue(fired[i]);
+
+// Error objects with custom stack getter.
+var custom_error = new Error();
+var custom_getter_fired = false;
+custom_error.__defineGetter__("stack",
+                              function() { custom_getter_fired = true; });
+gc();
+assertFalse(custom_getter_fired);
+
+// Check that formatting caused by GC is not somehow observable.
+var error;
+
+var obj = { foo: function foo() { throw new Error(); } };
+
+try {
+  obj.foo();
+} catch (e) {
+  delete obj.foo;
+  Object.defineProperty(obj, 'foo', {
+    get: function() { assertUnreachable(); }
+  });
+  error = e;
+}
+
+gc();
+
+Object.defineProperty(Array.prototype, '0', {
+  get: function() { assertUnreachable(); }
+});
+
+try {
+  throw new Error();
+} catch (e) {
+  error = e;
+}
+
+gc();
+
+String.prototype.indexOf = function() { assertUnreachable(); };
+String.prototype.lastIndexOf = function() { assertUnreachable(); };
+var obj = { method: function() { throw Error(); } };
+try {
+  obj.method();
+} catch (e) {
+  error = e;
+}
+
+gc();
diff --git a/test/mjsunit/stack-traces.js b/test/mjsunit/stack-traces.js
index 438eec9..b5d58fa 100644
--- a/test/mjsunit/stack-traces.js
+++ b/test/mjsunit/stack-traces.js
@@ -288,4 +288,42 @@
                    }, "QuickSort");
 
 // Omitted because ADD from runtime.js is non-native builtin.
-testOmittedBuiltin(function(){ thrower + 2; }, "ADD");
\ No newline at end of file
+testOmittedBuiltin(function(){ thrower + 2; }, "ADD");
+
+var error = new Error();
+error.toString = function() { assertUnreachable(); };
+error.stack;
+
+error = new Error();
+error.name = { toString: function() { assertUnreachable(); }};
+error.message = { toString: function() {  assertUnreachable(); }};
+error.stack;
+
+error = new Error();
+Array.prototype.push = function(x) { assertUnreachable(); };
+Array.prototype.join = function(x) { assertUnreachable(); };
+error.stack;
+
+var fired = false;
+error = new Error({ toString: function() { fired = true; } });
+assertTrue(fired);
+error.stack;
+assertTrue(fired);
+
+// Check that throwing exception in a custom stack trace formatting function
+// does not lead to recursion.
+Error.prepareStackTrace = function() { throw new Error("abc"); };
+var message;
+try {
+  throw new Error();
+} catch (e) {
+  message = e.message;
+}
+
+assertEquals("abc", message);
+
+// Test that modifying Error.prepareStackTrace by itself works.
+Error.prepareStackTrace = function() { Error.prepareStackTrace = "custom"; };
+new Error();
+
+assertEquals("custom", Error.prepareStackTrace);
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index aad07c7..046a72c 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -262,6 +262,7 @@
             '../../src/circular-queue.h',
             '../../src/code-stubs.cc',
             '../../src/code-stubs.h',
+            '../../src/code-stubs-hydrogen.cc',
             '../../src/code.h',
             '../../src/codegen.cc',
             '../../src/codegen.h',
diff --git a/tools/plot-timer-events.js b/tools/plot-timer-events.js
index 4b17e76..0a5e522 100644
--- a/tools/plot-timer-events.js
+++ b/tools/plot-timer-events.js
@@ -94,8 +94,8 @@
 }
 
 
-var xrange_start = Infinity;
-var xrange_end = 0;
+var xrange_start;
+var xrange_end;
 var obj_index = 0;
 var execution_pauses = [];
 var code_map = new CodeMap();
@@ -268,6 +268,38 @@
 }
 
 
+function FindPlotRange() {
+  var start_found = (xrange_start_override || xrange_start_override == 0);
+  var end_found = (xrange_end_override || xrange_end_override == 0);
+  xrange_start = start_found ? xrange_start_override : Infinity;
+  xrange_end = end_found ? xrange_end_override : -Infinity;
+
+  if (start_found && end_found) return;
+
+  var execution_ranges = kExecutionEvent.ranges;
+  for (var i = 0; i < execution_ranges.length; i++) {
+    if (execution_ranges[i].start < xrange_start && !start_found) {
+      xrange_start = execution_ranges[i].start;
+    }
+    if (execution_ranges[i].end > xrange_end && !end_found) {
+      xrange_end = execution_ranges[i].end;
+    }
+  }
+
+  for (codekind in CodeKinds) {
+    var ticks = CodeKinds[codekind].in_execution;
+    for (var i = 0; i < ticks.length; i++) {
+      if (ticks[i].tick < xrange_start && !start_found) {
+        xrange_start = ticks[i].tick;
+      }
+      if (ticks[i].tick > xrange_end && !end_found) {
+        xrange_end = ticks[i].tick;
+      }
+    }
+  }
+}
+
+
 function CollectData() {
   // Collect data from log.
   var logreader = new LogReader(
@@ -297,17 +329,6 @@
 
   Undistort();
 
-  // Figure out plot range.
-  var execution_ranges = kExecutionEvent.ranges;
-  for (var i = 0; i < execution_ranges.length; i++) {
-    if (execution_ranges[i].start < xrange_start) {
-      xrange_start = execution_ranges[i].start;
-    }
-    if (execution_ranges[i].end > xrange_end) {
-      xrange_end = execution_ranges[i].end;
-    }
-  }
-
   // Collect execution pauses.
   for (name in TimerEvents) {
     var event = TimerEvents[name];
@@ -466,10 +487,8 @@
 
 
 function GnuplotOutput() {
-  xrange_start = (xrange_start_override || xrange_start_override == 0)
-                     ? xrange_start_override : xrange_start;
-  xrange_end = (xrange_end_override || xrange_end_override == 0)
-                   ? xrange_end_override : xrange_end;
+  FindPlotRange();
+
   print("set terminal pngcairo size " + kResX + "," + kResY +
         " enhanced font 'Helvetica,10'");
   print("set yrange [0:" + (num_timer_event + 1) + "]");