Version 3.20.8

Deprecated v8::V8::Pause/ResumeProfiler.

Fixed Chromium issues 247688, 258519 and 260203.

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@15845 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index e05d039..3effe1a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,12 @@
+2013-07-24: Version 3.20.8
+
+        Deprecated v8::V8::Pause/ResumeProfiler.
+
+        Fixed Chromium issues 247688, 258519 and 260203.
+
+        Performance and stability improvements on all platforms.
+
+
 2013-07-22: Version 3.20.7
 
         Deprecated some debugger methods.
diff --git a/include/v8.h b/include/v8.h
index 9ce0583..7b4a5b2 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -4545,18 +4545,18 @@
    * See also the --prof and --prof_auto command line switches to
    * enable V8 profiling.
    */
-  static void PauseProfiler();
+  V8_DEPRECATED(static void PauseProfiler());
 
   /**
    * Resumes recording of tick samples in the profiler.
    * See also PauseProfiler().
    */
-  static void ResumeProfiler();
+  V8_DEPRECATED(static void ResumeProfiler());
 
   /**
    * Return whether profiler is currently paused.
    */
-  static bool IsProfilerPaused();
+  V8_DEPRECATED(static bool IsProfilerPaused());
 
   /**
    * Retrieve the V8 thread id of the calling thread.
@@ -5412,7 +5412,7 @@
   static const int kJSObjectType = 0xb1;
   static const int kFirstNonstringType = 0x80;
   static const int kOddballType = 0x83;
-  static const int kForeignType = 0x88;
+  static const int kForeignType = 0x87;
 
   static const int kUndefinedOddballKind = 5;
   static const int kNullOddballKind = 3;
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 7773667..f45c75b 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -38,6 +38,16 @@
 namespace internal {
 
 
+void ToNumberStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { r0 };
+  descriptor->register_param_count_ = 1;
+  descriptor->register_params_ = registers;
+  descriptor->deoptimization_handler_ = NULL;
+}
+
+
 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
     Isolate* isolate,
     CodeStubInterfaceDescriptor* descriptor) {
@@ -286,17 +296,6 @@
                                            Register rhs);
 
 
-// Check if the operand is a heap number.
-static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
-                                   Register scratch1, Register scratch2,
-                                   Label* not_a_heap_number) {
-  __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
-  __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
-  __ cmp(scratch1, scratch2);
-  __ b(ne, not_a_heap_number);
-}
-
-
 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
   // Update the static counter each time a new code stub is generated.
   Isolate* isolate = masm->isolate();
@@ -321,22 +320,6 @@
 }
 
 
-void ToNumberStub::Generate(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in eax.
-  Label check_heap_number, call_builtin;
-  __ JumpIfNotSmi(r0, &check_heap_number);
-  __ Ret();
-
-  __ bind(&check_heap_number);
-  EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin);
-  __ Ret();
-
-  __ bind(&call_builtin);
-  __ push(r0);
-  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
   // Create a new closure from the given function info in new
   // space. Set the context to the current context in cp.
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 780bafb..363ea0c 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -635,6 +635,17 @@
   __ bind(&done);
 }
 
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+  SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+  SetFrameSlot(offset, value);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index b68d22f..a84045b 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -701,11 +701,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
-  return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
   return AssignEnvironment(new(zone()) LDeoptimize);
 }
@@ -2035,9 +2030,14 @@
 
 
 LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
-  LUnallocated* temp1 = TempRegister();
-  LOperand* temp2 = TempRegister();
+  LUnallocated* temp1 = NULL;
+  LOperand* temp2 = NULL;
+  if (!instr->CanOmitPrototypeChecks()) {
+    temp1 = TempRegister();
+    temp2 = TempRegister();
+  }
   LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
+  if (instr->CanOmitPrototypeChecks()) return result;
   return AssignEnvironment(result);
 }
 
@@ -2049,8 +2049,10 @@
 
 
 LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* value = NULL;
+  if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value());
   LInstruction* result = new(zone()) LCheckMaps(value);
+  if (instr->CanOmitMapChecks()) return result;
   return AssignEnvironment(result);
 }
 
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 5165f1b..2055e6a 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -40,12 +40,6 @@
 // Forward declarations.
 class LCodeGen;
 
-#define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
-  V(ControlInstruction)                         \
-  V(Call)                                       \
-  LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
 #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
@@ -73,6 +67,7 @@
   V(CheckInstanceType)                          \
   V(CheckNonSmi)                                \
   V(CheckMaps)                                  \
+  V(CheckMapValue)                              \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
   V(ClampDToUint8)                              \
@@ -89,14 +84,18 @@
   V(ConstantS)                                  \
   V(ConstantT)                                  \
   V(Context)                                    \
+  V(DateField)                                  \
   V(DebugBreak)                                 \
   V(DeclareGlobals)                             \
   V(Deoptimize)                                 \
   V(DivI)                                       \
   V(DoubleToI)                                  \
   V(DoubleToSmi)                                \
+  V(Drop)                                       \
   V(DummyUse)                                   \
   V(ElementsKind)                               \
+  V(ForInCacheArray)                            \
+  V(ForInPrepareMap)                            \
   V(FunctionLiteral)                            \
   V(GetCachedArrayIndex)                        \
   V(GlobalObject)                               \
@@ -104,13 +103,13 @@
   V(Goto)                                       \
   V(HasCachedArrayIndexAndBranch)               \
   V(HasInstanceTypeAndBranch)                   \
+  V(InnerAllocatedObject)                       \
   V(InstanceOf)                                 \
   V(InstanceOfKnownGlobal)                      \
   V(InstanceSize)                               \
   V(InstructionGap)                             \
   V(Integer32ToDouble)                          \
   V(Integer32ToSmi)                             \
-  V(Uint32ToDouble)                             \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
   V(IsObjectAndBranch)                          \
@@ -123,6 +122,7 @@
   V(LinkObjectInList)                           \
   V(LoadContextSlot)                            \
   V(LoadExternalArrayPointer)                   \
+  V(LoadFieldByIndex)                           \
   V(LoadFunctionPrototype)                      \
   V(LoadGlobalCell)                             \
   V(LoadGlobalGeneric)                          \
@@ -187,16 +187,10 @@
   V(TrapAllocationMemento)                      \
   V(Typeof)                                     \
   V(TypeofIsAndBranch)                          \
+  V(Uint32ToDouble)                             \
   V(UnknownOSRValue)                            \
   V(ValueOf)                                    \
-  V(ForInPrepareMap)                            \
-  V(ForInCacheArray)                            \
-  V(CheckMapValue)                              \
-  V(LoadFieldByIndex)                           \
-  V(DateField)                                  \
-  V(WrapReceiver)                               \
-  V(Drop)                                       \
-  V(InnerAllocatedObject)
+  V(WrapReceiver)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
@@ -433,6 +427,7 @@
 class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
 
 
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 89eb8c8..fab5a1b 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -810,12 +810,6 @@
 }
 
 
-void LCodeGen::SoftDeoptimize(LEnvironment* environment) {
-  ASSERT(!info()->IsStub());
-  DeoptimizeIf(al, environment, Deoptimizer::SOFT);
-}
-
-
 void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
   ZoneList<Handle<Map> > maps(1, zone());
   int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -5232,6 +5226,7 @@
 
 
 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+  if (instr->hydrogen()->CanOmitMapChecks()) return;
   Register map_reg = scratch0();
   LOperand* input = instr->value();
   ASSERT(input->IsRegister());
@@ -5304,6 +5299,8 @@
 
 
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+  if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
+
   Register prototype_reg = ToRegister(instr->temp());
   Register map_reg = ToRegister(instr->temp2());
 
@@ -5312,12 +5309,10 @@
 
   ASSERT(prototypes->length() == maps->length());
 
-  if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
-    for (int i = 0; i < prototypes->length(); i++) {
-      __ LoadHeapObject(prototype_reg, prototypes->at(i));
-      __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
-      DoCheckMapCommon(map_reg, maps->at(i), instr->environment());
-    }
+  for (int i = 0; i < prototypes->length(); i++) {
+    __ LoadHeapObject(prototype_reg, prototypes->at(i));
+    __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
+    DoCheckMapCommon(map_reg, maps->at(i), instr->environment());
   }
 }
 
@@ -5366,6 +5361,25 @@
   }
 
   __ bind(deferred->exit());
+
+  if (instr->hydrogen()->MustPrefillWithFiller()) {
+    if (instr->size()->IsConstantOperand()) {
+      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+      __ mov(scratch, Operand(size));
+    } else {
+      scratch = ToRegister(instr->size());
+    }
+    __ sub(scratch, scratch, Operand(kPointerSize));
+    __ sub(result, result, Operand(kHeapObjectTag));
+    Label loop;
+    __ bind(&loop);
+    __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    __ str(scratch2, MemOperand(result, scratch));
+    __ sub(scratch, scratch, Operand(kPointerSize));
+    __ cmp(scratch, Operand(0));
+    __ b(ge, &loop);
+    __ add(result, result, Operand(kHeapObjectTag));
+  }
 }
 
 
@@ -5626,11 +5640,15 @@
 
 
 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
-  if (instr->hydrogen_value()->IsSoftDeoptimize()) {
-    SoftDeoptimize(instr->environment());
-  } else {
-    DeoptimizeIf(al, instr->environment());
+  Deoptimizer::BailoutType type = instr->hydrogen()->type();
+  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+  // needed return address), even though the implementation of LAZY and EAGER is
+  // now identical. When LAZY is eventually completely folded into EAGER, remove
+  // the special case below.
+  if (info()->IsStub() && type == Deoptimizer::EAGER) {
+    type = Deoptimizer::LAZY;
   }
+  DeoptimizeIf(al, instr->environment(), type);
 }
 
 
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index b0390ee..dac3ffe 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -284,7 +284,6 @@
                     LEnvironment* environment,
                     Deoptimizer::BailoutType bailout_type);
   void DeoptimizeIf(Condition cc, LEnvironment* environment);
-  void SoftDeoptimize(LEnvironment* environment);
 
   void AddToTranslation(Translation* translation,
                         LOperand* op,
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 8416926..ba80cda 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1625,6 +1625,7 @@
                               Register scratch2,
                               Label* gc_required,
                               AllocationFlags flags) {
+  ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -3260,9 +3261,10 @@
     Register scratch1,
     Register scratch2,
     Label* failure) {
-  int kFlatAsciiStringMask =
+  const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+  const int kFlatAsciiStringTag =
+      kStringTag | kOneByteStringTag | kSeqStringTag;
   and_(scratch1, first, Operand(kFlatAsciiStringMask));
   and_(scratch2, second, Operand(kFlatAsciiStringMask));
   cmp(scratch1, Operand(kFlatAsciiStringTag));
@@ -3275,9 +3277,10 @@
 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
                                                             Register scratch,
                                                             Label* failure) {
-  int kFlatAsciiStringMask =
+  const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+  const int kFlatAsciiStringTag =
+      kStringTag | kOneByteStringTag | kSeqStringTag;
   and_(scratch, type, Operand(kFlatAsciiStringMask));
   cmp(scratch, Operand(kFlatAsciiStringTag));
   b(ne, failure);
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 324dfa9..1b341b5 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -178,7 +178,7 @@
   AddInstruction(context_);
   start_environment->BindContext(context_);
 
-  AddSimulate(BailoutId::StubEntry());
+  Add<HSimulate>(BailoutId::StubEntry());
 
   NoObservableSideEffectsScope no_effects(this);
 
@@ -307,6 +307,37 @@
 
 
 template <>
+HValue* CodeStubGraphBuilder<ToNumberStub>::BuildCodeStub() {
+  HValue* value = GetParameter(0);
+
+  // Check if the parameter is already a SMI or heap number.
+  IfBuilder if_number(this);
+  if_number.If<HIsSmiAndBranch>(value);
+  if_number.OrIf<HCompareMap>(value, isolate()->factory()->heap_number_map());
+  if_number.Then();
+
+  // Return the number.
+  Push(value);
+
+  if_number.Else();
+
+  // Convert the parameter to number using the builtin.
+  HValue* function = AddLoadJSBuiltin(Builtins::TO_NUMBER, context());
+  Add<HPushArgument>(value);
+  Push(Add<HInvokeFunction>(context(), function, 1));
+
+  if_number.End();
+
+  return Pop();
+}
+
+
+Handle<Code> ToNumberStub::GenerateCode() {
+  return DoGenerateCode(this);
+}
+
+
+template <>
 HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
   Zone* zone = this->zone();
   Factory* factory = isolate()->factory();
@@ -366,9 +397,10 @@
                                                length));
   }
 
-  HValue* result = environment()->Pop();
   checker.ElseDeopt();
-  return result;
+  checker.End();
+
+  return environment()->Pop();
 }
 
 
@@ -416,8 +448,11 @@
     AddStore(object, access, AddLoad(boilerplate, access));
   }
 
+  environment()->Push(object);
   checker.ElseDeopt();
-  return object;
+  checker.End();
+
+  return environment()->Pop();
 }
 
 
@@ -850,23 +885,26 @@
   HParameter* receiver = GetParameter(0);
   HParameter* value = GetParameter(2);
 
+  // Check that the map of the global has not changed: use a placeholder map
+  // that will be replaced later with the global object's map.
+  Handle<Map> placeholder_map = isolate()->factory()->meta_map();
+  AddInstruction(HCheckMaps::New(
+      receiver, placeholder_map, zone(), top_info()));
+
+  HValue* cell = Add<HConstant>(placeholder_cell, Representation::Tagged());
+  HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
+  HValue* cell_contents = Add<HLoadNamedField>(cell, access);
+
   if (stub->is_constant()) {
-    // Assume every store to a constant value changes it.
-    current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
-    set_current_block(NULL);
+    IfBuilder builder(this);
+    builder.If<HCompareObjectEqAndBranch>(cell_contents, value);
+    builder.Then();
+    builder.ElseDeopt();
+    builder.End();
   } else {
-    HValue* cell = Add<HConstant>(placeholder_cell, Representation::Tagged());
-
-    // Check that the map of the global has not changed: use a placeholder map
-    // that will be replaced later with the global object's map.
-    Handle<Map> placeholder_map = isolate()->factory()->meta_map();
-    AddInstruction(HCheckMaps::New(receiver, placeholder_map, zone()));
-
     // Load the payload of the global parameter cell. A hole indicates that the
     // property has been deleted and that the store must be handled by the
     // runtime.
-    HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
-    HValue* cell_contents = Add<HLoadNamedField>(cell, access);
     IfBuilder builder(this);
     HValue* hole_value = Add<HConstant>(hole, Representation::Tagged());
     builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
@@ -876,6 +914,7 @@
     Add<HStoreNamedField>(cell, access, value);
     builder.End();
   }
+
   return value;
 }
 
@@ -894,8 +933,7 @@
 
   if (FLAG_trace_elements_transitions) {
     // Tracing elements transitions is the job of the runtime.
-    current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
-    set_current_block(NULL);
+    Add<HDeoptimize>(Deoptimizer::EAGER);
   } else {
     info()->MarkAsSavesCallerDoubles();
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 3359354..bc581d8 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -474,15 +474,19 @@
 };
 
 
-class ToNumberStub: public PlatformCodeStub {
+class ToNumberStub: public HydrogenCodeStub {
  public:
   ToNumberStub() { }
 
-  void Generate(MacroAssembler* masm);
+  virtual Handle<Code> GenerateCode();
+
+  virtual void InitializeInterfaceDescriptor(
+      Isolate* isolate,
+      CodeStubInterfaceDescriptor* descriptor);
 
  private:
   Major MajorKey() { return ToNumber; }
-  int MinorKey() { return 0; }
+  int NotMissMinorKey() { return 0; }
 };
 
 
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index d3fadb5..0d226cf 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -264,7 +264,7 @@
                                   Code* code,
                                   SharedFunctionInfo* shared,
                                   CompilationInfo* info,
-                                  String* source, int line) {
+                                  Name* source, int line) {
   if (FilterOutCodeCreateEvent(tag)) return;
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index 44e63fe..66e2b8b 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -173,14 +173,14 @@
 };
 
 
-#define PROFILE(IsolateGetter, Call)                                   \
-  do {                                                                 \
-    Isolate* cpu_profiler_isolate = (IsolateGetter);                   \
-    LOG_CODE_EVENT(cpu_profiler_isolate, Call);                        \
-    CpuProfiler* cpu_profiler = cpu_profiler_isolate->cpu_profiler();  \
-    if (cpu_profiler->is_profiling()) {                                \
-      cpu_profiler->Call;                                              \
-    }                                                                  \
+#define PROFILE(IsolateGetter, Call)                                        \
+  do {                                                                      \
+    Isolate* cpu_profiler_isolate = (IsolateGetter);                        \
+    v8::internal::Logger* logger = cpu_profiler_isolate->logger();          \
+    CpuProfiler* cpu_profiler = cpu_profiler_isolate->cpu_profiler();       \
+    if (logger->is_logging_code_events() || cpu_profiler->is_profiling()) { \
+      logger->Call;                                                         \
+    }                                                                       \
   } while (false)
 
 
@@ -223,7 +223,7 @@
                        Code* code,
                        SharedFunctionInfo* shared,
                        CompilationInfo* info,
-                       String* source, int line);
+                       Name* source, int line);
   void CodeCreateEvent(Logger::LogEventsAndTags tag,
                        Code* code, int args_count);
   void CodeMovingGCEvent() {}
diff --git a/src/d8.cc b/src/d8.cc
index e576e9c..5343174 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -457,16 +457,6 @@
 }
 
 
-void Shell::EnableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  V8::ResumeProfiler();
-}
-
-
-void Shell::DisableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  V8::PauseProfiler();
-}
-
-
 void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
   String::Utf8Value file(args[0]);
   if (*file == NULL) {
@@ -857,10 +847,6 @@
   global_template->Set(String::New("load"), FunctionTemplate::New(Load));
   global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
   global_template->Set(String::New("version"), FunctionTemplate::New(Version));
-  global_template->Set(String::New("enableProfiler"),
-                       FunctionTemplate::New(EnableProfiler));
-  global_template->Set(String::New("disableProfiler"),
-                       FunctionTemplate::New(DisableProfiler));
 
   // Bind the Realm object.
   Handle<ObjectTemplate> realm_template = ObjectTemplate::New();
diff --git a/src/d8.h b/src/d8.h
index 804cc46..4f04342 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -317,8 +317,6 @@
   static void Write(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void Version(const v8::FunctionCallbackInfo<v8::Value>& args);
-  static void EnableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args);
-  static void DisableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void Read(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
   static Handle<String> ReadFromStdin(Isolate* isolate);
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index 88efbe2..a588b4c 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -1469,8 +1469,6 @@
         this.suspendRequest_(request, response);
       } else if (request.command == 'version') {
         this.versionRequest_(request, response);
-      } else if (request.command == 'profile') {
-        this.profileRequest_(request, response);
       } else if (request.command == 'changelive') {
         this.changeLiveRequest_(request, response);
       } else if (request.command == 'restartframe') {
@@ -2400,18 +2398,6 @@
 };
 
 
-DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
-  if (request.arguments.command == 'resume') {
-    %ProfilerResume();
-  } else if (request.arguments.command == 'pause') {
-    %ProfilerPause();
-  } else {
-    return response.failed('Unknown command');
-  }
-  response.body = {};
-};
-
-
 DebugCommandProcessor.prototype.changeLiveRequest_ = function(
     request, response) {
   if (!request.arguments) {
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index fd7c282..72cf3c0 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -900,15 +900,15 @@
   // input frame.  For all subsequent output frames, it can be read from the
   // previous one.  This frame's pc can be computed from the non-optimized
   // function code and AST id of the bailout.
-  output_offset -= kPointerSize;
-  input_offset -= kPointerSize;
+  output_offset -= kPCOnStackSize;
+  input_offset -= kPCOnStackSize;
   intptr_t value;
   if (is_bottommost) {
     value = input_->GetFrameSlot(input_offset);
   } else {
     value = output_[frame_index - 1]->GetPc();
   }
-  output_frame->SetFrameSlot(output_offset, value);
+  output_frame->SetCallerPc(output_offset, value);
   if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR  " ; caller's pc\n",
@@ -919,14 +919,14 @@
   // as in the input frame.  For all subsequent output frames, it can be
   // read from the previous one.  Also compute and set this frame's frame
   // pointer.
-  output_offset -= kPointerSize;
-  input_offset -= kPointerSize;
+  output_offset -= kFPOnStackSize;
+  input_offset -= kFPOnStackSize;
   if (is_bottommost) {
     value = input_->GetFrameSlot(input_offset);
   } else {
     value = output_[frame_index - 1]->GetFp();
   }
-  output_frame->SetFrameSlot(output_offset, value);
+  output_frame->SetCallerFp(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   ASSERT(!is_bottommost || (input_->GetRegister(fp_reg.code()) +
       has_alignment_padding_ * kPointerSize) == fp_value);
@@ -1049,9 +1049,9 @@
   }
 
   // Read caller's PC from the previous frame.
-  output_offset -= kPointerSize;
+  output_offset -= kPCOnStackSize;
   intptr_t callers_pc = output_[frame_index - 1]->GetPc();
-  output_frame->SetFrameSlot(output_offset, callers_pc);
+  output_frame->SetCallerPc(output_offset, callers_pc);
   if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; caller's pc\n",
@@ -1059,9 +1059,9 @@
   }
 
   // Read caller's FP from the previous frame, and set this frame's FP.
-  output_offset -= kPointerSize;
+  output_offset -= kFPOnStackSize;
   intptr_t value = output_[frame_index - 1]->GetFp();
-  output_frame->SetFrameSlot(output_offset, value);
+  output_frame->SetCallerFp(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
   if (trace_) {
@@ -1152,9 +1152,9 @@
   }
 
   // Read caller's PC from the previous frame.
-  output_offset -= kPointerSize;
+  output_offset -= kPCOnStackSize;
   intptr_t callers_pc = output_[frame_index - 1]->GetPc();
-  output_frame->SetFrameSlot(output_offset, callers_pc);
+  output_frame->SetCallerPc(output_offset, callers_pc);
   if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; caller's pc\n",
@@ -1162,9 +1162,9 @@
   }
 
   // Read caller's FP from the previous frame, and set this frame's FP.
-  output_offset -= kPointerSize;
+  output_offset -= kFPOnStackSize;
   intptr_t value = output_[frame_index - 1]->GetFp();
-  output_frame->SetFrameSlot(output_offset, value);
+  output_frame->SetCallerFp(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
   if (trace_) {
@@ -1265,7 +1265,9 @@
   // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
   // entry for the implicit return value, see
   // StoreStubCompiler::CompileStoreViaSetter.
-  unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0);
+  unsigned fixed_frame_entries = (kPCOnStackSize / kPointerSize) +
+                                 (kFPOnStackSize / kPointerSize) + 3 +
+                                 (is_setter_stub_frame ? 1 : 0);
   unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
   unsigned output_frame_size = height_in_bytes + fixed_frame_size;
 
@@ -1287,9 +1289,9 @@
   unsigned output_offset = output_frame_size;
 
   // Read caller's PC from the previous frame.
-  output_offset -= kPointerSize;
+  output_offset -= kPCOnStackSize;
   intptr_t callers_pc = output_[frame_index - 1]->GetPc();
-  output_frame->SetFrameSlot(output_offset, callers_pc);
+  output_frame->SetCallerPc(output_offset, callers_pc);
   if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; caller's pc\n",
@@ -1297,9 +1299,9 @@
   }
 
   // Read caller's FP from the previous frame, and set this frame's FP.
-  output_offset -= kPointerSize;
+  output_offset -= kFPOnStackSize;
   intptr_t value = output_[frame_index - 1]->GetFp();
-  output_frame->SetFrameSlot(output_offset, value);
+  output_frame->SetCallerFp(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
   if (trace_) {
@@ -1435,10 +1437,10 @@
   output_frame->SetTop(top_address);
 
   // Read caller's PC (JSFunction continuation) from the input frame.
-  unsigned input_frame_offset = input_frame_size - kPointerSize;
-  unsigned output_frame_offset = output_frame_size - kPointerSize;
+  unsigned input_frame_offset = input_frame_size - kPCOnStackSize;
+  unsigned output_frame_offset = output_frame_size - kFPOnStackSize;
   intptr_t value = input_->GetFrameSlot(input_frame_offset);
-  output_frame->SetFrameSlot(output_frame_offset, value);
+  output_frame->SetCallerPc(output_frame_offset, value);
   if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; caller's pc\n",
@@ -1446,10 +1448,10 @@
   }
 
   // Read caller's FP from the input frame, and set this frame's FP.
-  input_frame_offset -= kPointerSize;
+  input_frame_offset -= kFPOnStackSize;
   value = input_->GetFrameSlot(input_frame_offset);
-  output_frame_offset -= kPointerSize;
-  output_frame->SetFrameSlot(output_frame_offset, value);
+  output_frame_offset -= kFPOnStackSize;
+  output_frame->SetCallerFp(output_frame_offset, value);
   intptr_t frame_ptr = input_->GetRegister(fp_reg.code());
   output_frame->SetRegister(fp_reg.code(), frame_ptr);
   output_frame->SetFp(frame_ptr);
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index d28be23..b29c1cb 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -510,6 +510,10 @@
     *GetFrameSlotPointer(offset) = value;
   }
 
+  void SetCallerPc(unsigned offset, intptr_t value);
+
+  void SetCallerFp(unsigned offset, intptr_t value);
+
   intptr_t GetRegister(unsigned n) const {
     ASSERT(n < ARRAY_SIZE(registers_));
     return registers_[n];
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 63cf663..1b0e33c 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -307,6 +307,9 @@
 DEFINE_bool(omit_prototype_checks_for_leaf_maps, true,
             "do not emit prototype checks if all prototypes have leaf maps, "
             "deoptimize the optimized code if the layout of the maps changes.")
+DEFINE_bool(omit_map_checks_for_leaf_maps, true,
+            "do not emit check maps for constant values that have a leaf map, "
+            "deoptimize the optimized code if the layout of the maps changes.")
 
 // Experimental profiler changes.
 DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
diff --git a/src/frames-inl.h b/src/frames-inl.h
index d097ed1..2b15bff 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -334,10 +334,10 @@
 }
 
 
-inline JavaScriptFrame* SafeStackFrameIterator::frame() const {
+inline StackFrame* SafeStackFrameIterator::frame() const {
   ASSERT(!done());
-  ASSERT(frame_->is_java_script());
-  return static_cast<JavaScriptFrame*>(frame_);
+  ASSERT(frame_->is_java_script() || frame_->is_exit());
+  return frame_;
 }
 
 
diff --git a/src/frames.cc b/src/frames.cc
index 890e77a..61792a6 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -36,6 +36,7 @@
 #include "safepoint-table.h"
 #include "scopeinfo.h"
 #include "string-stream.h"
+#include "vm-state-inl.h"
 
 #include "allocation-inl.h"
 
@@ -221,7 +222,8 @@
     : StackFrameIteratorBase(isolate, false),
       low_bound_(sp),
       high_bound_(js_entry_sp),
-      top_frame_type_(StackFrame::NONE) {
+      top_frame_type_(StackFrame::NONE),
+      external_callback_scope_(isolate->external_callback_scope()) {
   StackFrame::State state;
   StackFrame::Type type;
   ThreadLocalTop* top = isolate->thread_local_top();
@@ -256,16 +258,28 @@
   }
   if (SingletonFor(type) == NULL) return;
   frame_ = SingletonFor(type, &state);
+  if (frame_ == NULL) return;
 
-  if (!done()) Advance();
+  Advance();
+
+  if (frame_ != NULL && !frame_->is_exit() &&
+      external_callback_scope_ != NULL &&
+      external_callback_scope_->scope_address() < frame_->fp()) {
+    // Skip top ExternalCallbackScope if we already advanced to a JS frame
+    // under it. Sampler will anyways take this top external callback.
+    external_callback_scope_ = external_callback_scope_->previous();
+  }
 }
 
 
 bool SafeStackFrameIterator::IsValidTop(ThreadLocalTop* top) const {
-  Address fp = Isolate::c_entry_fp(top);
-  if (!IsValidExitFrame(fp)) return false;
+  Address c_entry_fp = Isolate::c_entry_fp(top);
+  if (!IsValidExitFrame(c_entry_fp)) return false;
   // There should be at least one JS_ENTRY stack handler.
-  return Isolate::handler(top) != NULL;
+  Address handler = Isolate::handler(top);
+  if (handler == NULL) return false;
+  // Check that there are no js frames on top of the native frames.
+  return c_entry_fp < handler;
 }
 
 
@@ -340,6 +354,24 @@
     AdvanceOneFrame();
     if (done()) return;
     if (frame_->is_java_script()) return;
+    if (frame_->is_exit() && external_callback_scope_) {
+      // Some of the EXIT frames may have ExternalCallbackScope allocated on
+      // top of them. In that case the scope corresponds to the first EXIT
+      // frame beneath it. There may be other EXIT frames on top of the
+      // ExternalCallbackScope, just skip them as we cannot collect any useful
+      // information about them.
+      if (external_callback_scope_->scope_address() < frame_->fp()) {
+        Address* callback_address =
+            external_callback_scope_->callback_address();
+        if (*callback_address != NULL) {
+          frame_->state_.pc_address = callback_address;
+        }
+        external_callback_scope_ = external_callback_scope_->previous();
+        ASSERT(external_callback_scope_ == NULL ||
+               external_callback_scope_->scope_address() > frame_->fp());
+        return;
+      }
+    }
   }
 }
 
@@ -540,7 +572,7 @@
   state->sp = sp;
   state->fp = fp;
   state->pc_address = ResolveReturnAddressLocation(
-      reinterpret_cast<Address*>(sp - 1 * kPointerSize));
+      reinterpret_cast<Address*>(sp - 1 * kPCOnStackSize));
 }
 
 
diff --git a/src/frames.h b/src/frames.h
index 7e667a6..634ff8a 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -47,6 +47,7 @@
 
 
 // Forward declarations.
+class ExternalCallbackScope;
 class StackFrameIteratorBase;
 class ThreadLocalTop;
 class Isolate;
@@ -92,7 +93,7 @@
   static const int kContextOffset  = 3 * kPointerSize;
   static const int kFPOffset       = 4 * kPointerSize;
 
-  static const int kSize = kFPOffset + kPointerSize;
+  static const int kSize = kFPOffset + kFPOnStackSize;
   static const int kSlotCount = kSize >> kPointerSizeLog2;
 };
 
@@ -168,13 +169,14 @@
   // context and function.
   // StandardFrame::IterateExpressions assumes that kContextOffset is the last
   // object pointer.
-  static const int kFixedFrameSize    =  4 * kPointerSize;
+  static const int kFixedFrameSize    =  kPCOnStackSize + kFPOnStackSize +
+                                         2 * kPointerSize;
   static const int kExpressionsOffset = -3 * kPointerSize;
   static const int kMarkerOffset      = -2 * kPointerSize;
   static const int kContextOffset     = -1 * kPointerSize;
   static const int kCallerFPOffset    =  0 * kPointerSize;
-  static const int kCallerPCOffset    = +1 * kPointerSize;
-  static const int kCallerSPOffset    = +2 * kPointerSize;
+  static const int kCallerPCOffset    = +1 * kFPOnStackSize;
+  static const int kCallerSPOffset    = +2 * kPCOnStackSize;
 };
 
 
@@ -883,7 +885,7 @@
                          Address fp, Address sp,
                          Address js_entry_sp);
 
-  inline JavaScriptFrame* frame() const;
+  inline StackFrame* frame() const;
   void Advance();
 
   StackFrame::Type top_frame_type() const { return top_frame_type_; }
@@ -902,6 +904,7 @@
   const Address low_bound_;
   const Address high_bound_;
   StackFrame::Type top_frame_type_;
+  ExternalCallbackScope* external_callback_scope_;
 };
 
 
diff --git a/src/globals.h b/src/globals.h
index e695e94..26fd531 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -239,12 +239,15 @@
 
 const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
 
-const int kCharSize     = sizeof(char);      // NOLINT
-const int kShortSize    = sizeof(short);     // NOLINT
-const int kIntSize      = sizeof(int);       // NOLINT
-const int kDoubleSize   = sizeof(double);    // NOLINT
-const int kIntptrSize   = sizeof(intptr_t);  // NOLINT
-const int kPointerSize  = sizeof(void*);     // NOLINT
+const int kCharSize      = sizeof(char);      // NOLINT
+const int kShortSize     = sizeof(short);     // NOLINT
+const int kIntSize       = sizeof(int);       // NOLINT
+const int kDoubleSize    = sizeof(double);    // NOLINT
+const int kIntptrSize    = sizeof(intptr_t);  // NOLINT
+const int kPointerSize   = sizeof(void*);     // NOLINT
+const int kRegisterSize  = kPointerSize;
+const int kPCOnStackSize = kRegisterSize;
+const int kFPOnStackSize = kRegisterSize;
 
 const int kDoubleSizeLog2 = 3;
 
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 97c56df..3c1d4d2 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -712,19 +712,6 @@
 }
 
 
-void ErrorObjectList::Add(JSObject* object) {
-  list_.Add(object);
-}
-
-
-void ErrorObjectList::Iterate(ObjectVisitor* v) {
-  if (!list_.is_empty()) {
-    Object** start = &list_[0];
-    v->VisitPointers(start, start + list_.length());
-  }
-}
-
-
 void Heap::ClearInstanceofCache() {
   set_instanceof_cache_function(the_hole_value());
 }
diff --git a/src/heap.cc b/src/heap.cc
index dff217a..4a7e8f2 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -583,8 +583,6 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   isolate_->debug()->AfterGarbageCollection();
 #endif  // ENABLE_DEBUGGER_SUPPORT
-
-  error_object_list_.DeferredFormatStackTrace(isolate());
 }
 
 
@@ -1428,8 +1426,6 @@
   UpdateNewSpaceReferencesInExternalStringTable(
       &UpdateNewSpaceReferenceInExternalStringTableEntry);
 
-  error_object_list_.UpdateReferencesInNewSpace(this);
-
   promotion_queue_.Destroy();
 
   if (!FLAG_watch_ic_patching) {
@@ -5353,25 +5349,16 @@
   if (length < 0 || length > SeqOneByteString::kMaxLength) {
     return Failure::OutOfMemoryException(0xb);
   }
-
   int size = SeqOneByteString::SizeFor(length);
   ASSERT(size <= SeqOneByteString::kMaxSize);
-
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
   AllocationSpace retry_space = OLD_DATA_SPACE;
 
-  if (space == NEW_SPACE) {
-    if (size > kMaxObjectSizeInNewSpace) {
-      // Allocate in large object space, retry space will be ignored.
-      space = LO_SPACE;
-    } else if (size > Page::kMaxNonCodeHeapObjectSize) {
-      // Allocate in new space, retry in large object space.
-      retry_space = LO_SPACE;
-    }
-  } else if (space == OLD_DATA_SPACE &&
-             size > Page::kMaxNonCodeHeapObjectSize) {
+  if (size > Page::kMaxNonCodeHeapObjectSize) {
+    // Allocate in large object space, retry space will be ignored.
     space = LO_SPACE;
   }
+
   Object* result;
   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -5397,18 +5384,11 @@
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
   AllocationSpace retry_space = OLD_DATA_SPACE;
 
-  if (space == NEW_SPACE) {
-    if (size > kMaxObjectSizeInNewSpace) {
-      // Allocate in large object space, retry space will be ignored.
-      space = LO_SPACE;
-    } else if (size > Page::kMaxNonCodeHeapObjectSize) {
-      // Allocate in new space, retry in large object space.
-      retry_space = LO_SPACE;
-    }
-  } else if (space == OLD_DATA_SPACE &&
-             size > Page::kMaxNonCodeHeapObjectSize) {
+  if (size > Page::kMaxNonCodeHeapObjectSize) {
+    // Allocate in large object space, retry space will be ignored.
     space = LO_SPACE;
   }
+
   Object* result;
   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -5482,7 +5462,7 @@
   if (always_allocate()) return AllocateFixedArray(length, TENURED);
   // Allocate the raw data for a fixed array.
   int size = FixedArray::SizeFor(length);
-  return size <= kMaxObjectSizeInNewSpace
+  return size <= Page::kMaxNonCodeHeapObjectSize
       ? new_space_.AllocateRaw(size)
       : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
 }
@@ -5553,22 +5533,16 @@
   if (length < 0 || length > FixedArray::kMaxLength) {
     return Failure::OutOfMemoryException(0xe);
   }
-
+  int size = FixedArray::SizeFor(length);
   AllocationSpace space =
       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
-  int size = FixedArray::SizeFor(length);
-  if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
-    // Too big for new space.
-    space = LO_SPACE;
-  } else if (space == OLD_POINTER_SPACE &&
-             size > Page::kMaxNonCodeHeapObjectSize) {
-    // Too big for old pointer space.
+  AllocationSpace retry_space = OLD_POINTER_SPACE;
+
+  if (size > Page::kMaxNonCodeHeapObjectSize) {
+    // Allocate in large object space, retry space will be ignored.
     space = LO_SPACE;
   }
 
-  AllocationSpace retry_space =
-      (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
-
   return AllocateRaw(size, space, retry_space);
 }
 
@@ -5686,27 +5660,19 @@
   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
     return Failure::OutOfMemoryException(0xf);
   }
-
-  AllocationSpace space =
-      (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
   int size = FixedDoubleArray::SizeFor(length);
+  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+  AllocationSpace retry_space = OLD_DATA_SPACE;
 
 #ifndef V8_HOST_ARCH_64_BIT
   size += kPointerSize;
 #endif
 
-  if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
-    // Too big for new space.
-    space = LO_SPACE;
-  } else if (space == OLD_DATA_SPACE &&
-             size > Page::kMaxNonCodeHeapObjectSize) {
-    // Too big for old data space.
+  if (size > Page::kMaxNonCodeHeapObjectSize) {
+    // Allocate in large object space, retry space will be ignored.
     space = LO_SPACE;
   }
 
-  AllocationSpace retry_space =
-      (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
-
   HeapObject* object;
   { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
@@ -6575,7 +6541,6 @@
       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
     // Scavenge collections have special processing for this.
     external_string_table_.Iterate(v);
-    error_object_list_.Iterate(v);
   }
   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
 }
@@ -6975,8 +6940,6 @@
 
   external_string_table_.TearDown();
 
-  error_object_list_.TearDown();
-
   new_space_.TearDown();
 
   if (old_pointer_space_ != NULL) {
@@ -7929,120 +7892,6 @@
 }
 
 
-// Update all references.
-void ErrorObjectList::UpdateReferences() {
-  for (int i = 0; i < list_.length(); i++) {
-    HeapObject* object = HeapObject::cast(list_[i]);
-    MapWord first_word = object->map_word();
-    if (first_word.IsForwardingAddress()) {
-      list_[i] = first_word.ToForwardingAddress();
-    }
-  }
-}
-
-
-// Unforwarded objects in new space are dead and removed from the list.
-void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
-  if (list_.is_empty()) return;
-  if (!nested_) {
-    int write_index = 0;
-    for (int i = 0; i < list_.length(); i++) {
-      MapWord first_word = HeapObject::cast(list_[i])->map_word();
-      if (first_word.IsForwardingAddress()) {
-        list_[write_index++] = first_word.ToForwardingAddress();
-      }
-    }
-    list_.Rewind(write_index);
-  } else {
-    // If a GC is triggered during DeferredFormatStackTrace, we do not move
-    // objects in the list, just remove dead ones, as to not confuse the
-    // loop in DeferredFormatStackTrace.
-    for (int i = 0; i < list_.length(); i++) {
-      MapWord first_word = HeapObject::cast(list_[i])->map_word();
-      list_[i] = first_word.IsForwardingAddress()
-                     ? first_word.ToForwardingAddress()
-                     : heap->the_hole_value();
-    }
-  }
-}
-
-
-void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
-  // If formatting the stack trace causes a GC, this method will be
-  // recursively called.  In that case, skip the recursive call, since
-  // the loop modifies the list while iterating over it.
-  if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
-  nested_ = true;
-  HandleScope scope(isolate);
-  Handle<String> stack_key = isolate->factory()->stack_string();
-  int write_index = 0;
-  int budget = kBudgetPerGC;
-  for (int i = 0; i < list_.length(); i++) {
-    Object* object = list_[i];
-    JSFunction* getter_fun;
-
-    { DisallowHeapAllocation no_gc;
-      // Skip possible holes in the list.
-      if (object->IsTheHole()) continue;
-      if (isolate->heap()->InNewSpace(object) || budget == 0) {
-        list_[write_index++] = object;
-        continue;
-      }
-
-      // Check whether the stack property is backed by the original getter.
-      LookupResult lookup(isolate);
-      JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
-      if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
-      Object* callback = lookup.GetCallbackObject();
-      if (!callback->IsAccessorPair()) continue;
-      Object* getter_obj = AccessorPair::cast(callback)->getter();
-      if (!getter_obj->IsJSFunction()) continue;
-      getter_fun = JSFunction::cast(getter_obj);
-      String* key = isolate->heap()->hidden_stack_trace_string();
-      Object* value = getter_fun->GetHiddenProperty(key);
-      if (key != value) continue;
-    }
-
-    budget--;
-    HandleScope scope(isolate);
-    bool has_exception = false;
-#ifdef DEBUG
-    Handle<Map> map(HeapObject::cast(object)->map(), isolate);
-#endif
-    Handle<Object> object_handle(object, isolate);
-    Handle<Object> getter_handle(getter_fun, isolate);
-    Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
-    ASSERT(*map == HeapObject::cast(*object_handle)->map());
-    if (has_exception) {
-      // Hit an exception (most likely a stack overflow).
-      // Wrap up this pass and retry after another GC.
-      isolate->clear_pending_exception();
-      // We use the handle since calling the getter might have caused a GC.
-      list_[write_index++] = *object_handle;
-      budget = 0;
-    }
-  }
-  list_.Rewind(write_index);
-  list_.Trim();
-  nested_ = false;
-}
-
-
-void ErrorObjectList::RemoveUnmarked(Heap* heap) {
-  for (int i = 0; i < list_.length(); i++) {
-    HeapObject* object = HeapObject::cast(list_[i]);
-    if (!Marking::MarkBitFrom(object).Get()) {
-      list_[i] = heap->the_hole_value();
-    }
-  }
-}
-
-
-void ErrorObjectList::TearDown() {
-  list_.Free();
-}
-
-
 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
   chunk->set_next_chunk(chunks_queued_for_free_);
   chunks_queued_for_free_ = chunk;
diff --git a/src/heap.h b/src/heap.h
index 6b02363..203ced5 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -475,41 +475,6 @@
 };
 
 
-// The stack property of an error object is implemented as a getter that
-// formats the attached raw stack trace into a string.  This raw stack trace
-// keeps code and function objects alive until the getter is called the first
-// time.  To release those objects, we call the getter after each GC for
-// newly tenured error objects that are kept in a list.
-class ErrorObjectList {
- public:
-  inline void Add(JSObject* object);
-
-  inline void Iterate(ObjectVisitor* v);
-
-  void TearDown();
-
-  void RemoveUnmarked(Heap* heap);
-
-  void DeferredFormatStackTrace(Isolate* isolate);
-
-  void UpdateReferences();
-
-  void UpdateReferencesInNewSpace(Heap* heap);
-
- private:
-  static const int kBudgetPerGC = 16;
-
-  ErrorObjectList() : nested_(false) { }
-
-  friend class Heap;
-
-  List<Object*> list_;
-  bool nested_;
-
-  DISALLOW_COPY_AND_ASSIGN(ErrorObjectList);
-};
-
-
 enum ArrayStorageAllocationMode {
   DONT_INITIALIZE_ARRAY_ELEMENTS,
   INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
@@ -1716,8 +1681,6 @@
   // we try to promote this object.
   inline bool ShouldBePromoted(Address old_address, int object_size);
 
-  int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
-
   void ClearJSFunctionResultCaches();
 
   void ClearNormalizedMapCaches();
@@ -1798,10 +1761,6 @@
     return &external_string_table_;
   }
 
-  ErrorObjectList* error_object_list() {
-    return &error_object_list_;
-  }
-
   // Returns the current sweep generation.
   int sweep_generation() {
     return sweep_generation_;
@@ -1966,12 +1925,6 @@
 
   int scan_on_scavenge_pages_;
 
-#if V8_TARGET_ARCH_X64
-  static const int kMaxObjectSizeInNewSpace = 1024*KB;
-#else
-  static const int kMaxObjectSizeInNewSpace = 512*KB;
-#endif
-
   NewSpace new_space_;
   OldSpace* old_pointer_space_;
   OldSpace* old_data_space_;
@@ -2406,8 +2359,6 @@
 
   ExternalStringTable external_string_table_;
 
-  ErrorObjectList error_object_list_;
-
   VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
 
   MemoryChunk* chunks_queued_for_free_;
diff --git a/src/hydrogen-deoptimizing-mark.cc b/src/hydrogen-deoptimizing-mark.cc
index 804d947..626848e 100644
--- a/src/hydrogen-deoptimizing-mark.cc
+++ b/src/hydrogen-deoptimizing-mark.cc
@@ -107,7 +107,7 @@
         instr->DeleteAndReplaceWith(last_dummy);
         continue;
       }
-      if (instr->IsSoftDeoptimize()) {
+      if (instr->IsDeoptimize()) {
         ASSERT(block->IsDeoptimizing());
         nullify = true;
       }
diff --git a/src/hydrogen-environment-liveness.cc b/src/hydrogen-environment-liveness.cc
index 20e680c..9efa47b 100644
--- a/src/hydrogen-environment-liveness.cc
+++ b/src/hydrogen-environment-liveness.cc
@@ -172,15 +172,6 @@
       last_simulate_ = NULL;
       break;
     }
-    case HValue::kDeoptimize: {
-      // Keep all environment slots alive.
-      HDeoptimize* deopt = HDeoptimize::cast(instr);
-      for (int i = deopt->first_local_index();
-           i < deopt->first_expression_index(); ++i) {
-        live->Add(i);
-      }
-      break;
-    }
     case HValue::kSimulate:
       last_simulate_ = HSimulate::cast(instr);
       went_live_since_last_simulate_.Clear();
diff --git a/src/hydrogen-gvn.cc b/src/hydrogen-gvn.cc
index 09bea5b..9a02a1d 100644
--- a/src/hydrogen-gvn.cc
+++ b/src/hydrogen-gvn.cc
@@ -401,7 +401,7 @@
     for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
       HInstruction* instr = it.Current();
       side_effects.Add(instr->ChangesFlags());
-      if (instr->IsSoftDeoptimize()) {
+      if (instr->IsDeoptimize()) {
         block_side_effects_[id].RemoveAll();
         side_effects.RemoveAll();
         break;
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 880de29..52612ac 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1680,7 +1680,7 @@
   for (int i = 1; i < map_set()->length(); ++i) {
     stream->Add(",%p", *map_set()->at(i));
   }
-  stream->Add("]");
+  stream->Add("]%s", CanOmitMapChecks() ? "(omitted)" : "");
 }
 
 
@@ -2136,16 +2136,6 @@
 }
 
 
-void HDeoptimize::PrintDataTo(StringStream* stream) {
-  if (OperandCount() == 0) return;
-  OperandAt(0)->PrintNameTo(stream);
-  for (int i = 1; i < OperandCount(); ++i) {
-    stream->Add(" ");
-    OperandAt(i)->PrintNameTo(stream);
-  }
-}
-
-
 void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
                                          Zone* zone) {
   ASSERT(return_target->IsInlineReturnTarget());
@@ -2310,20 +2300,38 @@
 }
 
 
-HConstant* HConstant::CopyToTruncatedInt32(Zone* zone) const {
+Maybe<HConstant*> HConstant::CopyToTruncatedInt32(Zone* zone) {
+  HConstant* res = NULL;
   if (has_int32_value_) {
-    return new(zone) HConstant(int32_value_,
-                               Representation::Integer32(),
-                               is_not_in_new_space_,
-                               handle_);
+    res = new(zone) HConstant(int32_value_,
+                              Representation::Integer32(),
+                              is_not_in_new_space_,
+                              handle_);
+  } else if (has_double_value_) {
+    res = new(zone) HConstant(DoubleToInt32(double_value_),
+                              Representation::Integer32(),
+                              is_not_in_new_space_,
+                              handle_);
+  } else {
+    ASSERT(!HasNumberValue());
+    Maybe<HConstant*> number = CopyToTruncatedNumber(zone);
+    if (number.has_value) return number.value->CopyToTruncatedInt32(zone);
   }
-  if (has_double_value_) {
-    return new(zone) HConstant(DoubleToInt32(double_value_),
-                               Representation::Integer32(),
-                               is_not_in_new_space_,
-                               handle_);
+  return Maybe<HConstant*>(res != NULL, res);
+}
+
+
+Maybe<HConstant*> HConstant::CopyToTruncatedNumber(Zone* zone) {
+  HConstant* res = NULL;
+  if (handle()->IsBoolean()) {
+    res = handle()->BooleanValue() ?
+      new(zone) HConstant(1) : new(zone) HConstant(0);
+  } else if (handle()->IsUndefined()) {
+    res = new(zone) HConstant(OS::nan_value());
+  } else if (handle()->IsNull()) {
+    res = new(zone) HConstant(0);
   }
-  return NULL;
+  return Maybe<HConstant*>(res != NULL, res);
 }
 
 
@@ -2757,6 +2765,55 @@
 }
 
 
+HCheckMaps* HCheckMaps::New(HValue* value,
+                            Handle<Map> map,
+                            Zone* zone,
+                            CompilationInfo* info,
+                            HValue* typecheck) {
+  HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
+  check_map->map_set_.Add(map, zone);
+  if (map->CanOmitMapChecks() &&
+      value->IsConstant() &&
+      HConstant::cast(value)->InstanceOf(map)) {
+    check_map->omit(info);
+  }
+  return check_map;
+}
+
+
+HCheckMaps* HCheckMaps::NewWithTransitions(HValue* value,
+                                           Handle<Map> map,
+                                           Zone* zone,
+                                           CompilationInfo* info) {
+  HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, value);
+  check_map->map_set_.Add(map, zone);
+
+  // Since transitioned elements maps of the initial map don't fail the map
+  // check, the CheckMaps instruction doesn't need to depend on ElementsKinds.
+  check_map->ClearGVNFlag(kDependsOnElementsKind);
+
+  ElementsKind kind = map->elements_kind();
+  bool packed = IsFastPackedElementsKind(kind);
+  while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
+    kind = GetNextMoreGeneralFastElementsKind(kind, packed);
+    Map* transitioned_map =
+        map->LookupElementsTransitionMap(kind);
+    if (transitioned_map) {
+      check_map->map_set_.Add(Handle<Map>(transitioned_map), zone);
+    }
+  };
+
+  if (map->CanOmitMapChecks() &&
+      value->IsConstant() &&
+      HConstant::cast(value)->InstanceOf(map)) {
+    check_map->omit(info);
+  }
+
+  check_map->map_set_.Sort();
+  return check_map;
+}
+
+
 void HCheckMaps::FinalizeUniqueValueId() {
   if (!map_unique_ids_.is_empty()) return;
   Zone* zone = block()->zone();
@@ -3187,11 +3244,6 @@
 }
 
 
-HType HAllocate::CalculateInferredType() {
-  return type_;
-}
-
-
 void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
                                           HValue* dominator) {
   ASSERT(side_effect == kChangesNewSpacePromotion);
@@ -3210,12 +3262,9 @@
   HValue* dominator_size = dominator_allocate_instr->size();
   HValue* current_size = size();
   // We can just fold allocations that are guaranteed in new space.
-  // TODO(hpayer): Support double aligned allocations.
   // TODO(hpayer): Add support for non-constant allocation in dominator.
-  if (!GuaranteedInNewSpace() || MustAllocateDoubleAligned() ||
-      !current_size->IsInteger32Constant() ||
+  if (!GuaranteedInNewSpace() || !current_size->IsInteger32Constant() ||
       !dominator_allocate_instr->GuaranteedInNewSpace() ||
-      dominator_allocate_instr->MustAllocateDoubleAligned() ||
       !dominator_size->IsInteger32Constant()) {
     if (FLAG_trace_allocation_folding) {
       PrintF("#%d (%s) cannot fold into #%d (%s)\n",
@@ -3229,43 +3278,37 @@
       HConstant::cast(dominator_size)->GetInteger32Constant();
   int32_t current_size_constant =
       HConstant::cast(current_size)->GetInteger32Constant();
+  int32_t new_dominator_size = dominator_size_constant + current_size_constant;
+
+  if (MustAllocateDoubleAligned()) {
+    if (!dominator_allocate_instr->MustAllocateDoubleAligned()) {
+      dominator_allocate_instr->SetFlags(HAllocate::ALLOCATE_DOUBLE_ALIGNED);
+    }
+    if ((dominator_size_constant & kDoubleAlignmentMask) != 0) {
+      dominator_size_constant += kDoubleSize / 2;
+      new_dominator_size += kDoubleSize / 2;
+    }
+  }
+
+  if (new_dominator_size > Page::kMaxNonCodeHeapObjectSize) {
+    if (FLAG_trace_allocation_folding) {
+      PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n",
+          id(), Mnemonic(), dominator->id(), dominator->Mnemonic(),
+          new_dominator_size);
+    }
+    return;
+  }
   HBasicBlock* block = dominator->block();
   Zone* zone = block->zone();
-  HInstruction* new_dominator_size = new(zone) HConstant(
-      dominator_size_constant + current_size_constant);
-  new_dominator_size->InsertBefore(dominator_allocate_instr);
-  dominator_allocate_instr->UpdateSize(new_dominator_size);
+  HInstruction* new_dominator_size_constant = new(zone) HConstant(
+      new_dominator_size);
+  new_dominator_size_constant->InsertBefore(dominator_allocate_instr);
+  dominator_allocate_instr->UpdateSize(new_dominator_size_constant);
 
 #ifdef VERIFY_HEAP
-  HInstruction* free_space_instr =
-      new(zone) HInnerAllocatedObject(dominator_allocate_instr,
-                                      dominator_size_constant,
-                                      type());
-  free_space_instr->InsertAfter(dominator_allocate_instr);
-  HConstant* filler_map = new(zone) HConstant(
-      isolate()->factory()->free_space_map(),
-      UniqueValueId(isolate()->heap()->free_space_map()),
-      Representation::Tagged(),
-      HType::Tagged(),
-      false,
-      true,
-      false,
-      false);
-  filler_map->InsertAfter(free_space_instr);
-
-  HInstruction* store_map = new(zone) HStoreNamedField(
-      free_space_instr, HObjectAccess::ForMap(), filler_map);
-  store_map->SetFlag(HValue::kHasNoObservableSideEffects);
-  store_map->InsertAfter(filler_map);
-
-  HInstruction* free_space_size = new(zone) HConstant(current_size_constant);
-  free_space_size->InsertAfter(store_map);
-  HObjectAccess access =
-      HObjectAccess::ForJSObjectOffset(FreeSpace::kSizeOffset);
-  HInstruction* store_size = new(zone) HStoreNamedField(
-      free_space_instr, access, free_space_size);
-  store_size->SetFlag(HValue::kHasNoObservableSideEffects);
-  store_size->InsertAfter(free_space_size);
+  if (FLAG_verify_heap) {
+    dominator_allocate_instr->SetFlags(HAllocate::PREFILL_WITH_FILLER);
+  }
 #endif
 
   // After that replace the dominated allocate instruction.
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 5fba5f2..ad27465 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -33,6 +33,7 @@
 #include "allocation.h"
 #include "code-stubs.h"
 #include "data-flow.h"
+#include "deoptimizer.h"
 #include "small-pointer-list.h"
 #include "string-stream.h"
 #include "v8conversions.h"
@@ -91,8 +92,9 @@
   V(CheckHeapObject)                           \
   V(CheckInstanceType)                         \
   V(CheckMaps)                                 \
-  V(CheckSmi)                                  \
+  V(CheckMapValue)                             \
   V(CheckPrototypeMaps)                        \
+  V(CheckSmi)                                  \
   V(ClampToUint8)                              \
   V(ClassOfTestAndBranch)                      \
   V(CompareNumericAndBranch)                   \
@@ -102,6 +104,7 @@
   V(CompareConstantEqAndBranch)                \
   V(Constant)                                  \
   V(Context)                                   \
+  V(DateField)                                 \
   V(DebugBreak)                                \
   V(DeclareGlobals)                            \
   V(Deoptimize)                                \
@@ -111,6 +114,8 @@
   V(EnterInlined)                              \
   V(EnvironmentMarker)                         \
   V(ForceRepresentation)                       \
+  V(ForInCacheArray)                           \
+  V(ForInPrepareMap)                           \
   V(FunctionLiteral)                           \
   V(GetCachedArrayIndex)                       \
   V(GlobalObject)                              \
@@ -134,6 +139,7 @@
   V(LinkObjectInList)                          \
   V(LoadContextSlot)                           \
   V(LoadExternalArrayPointer)                  \
+  V(LoadFieldByIndex)                          \
   V(LoadFunctionPrototype)                     \
   V(LoadGlobalCell)                            \
   V(LoadGlobalGeneric)                         \
@@ -162,7 +168,6 @@
   V(Shl)                                       \
   V(Shr)                                       \
   V(Simulate)                                  \
-  V(SoftDeoptimize)                            \
   V(StackCheck)                                \
   V(StoreContextSlot)                          \
   V(StoreGlobalCell)                           \
@@ -188,11 +193,6 @@
   V(UnknownOSRValue)                           \
   V(UseConst)                                  \
   V(ValueOf)                                   \
-  V(ForInPrepareMap)                           \
-  V(ForInCacheArray)                           \
-  V(CheckMapValue)                             \
-  V(LoadFieldByIndex)                          \
-  V(DateField)                                 \
   V(WrapReceiver)
 
 #define GVN_TRACKED_FLAG_LIST(V)               \
@@ -200,19 +200,20 @@
   V(NewSpacePromotion)
 
 #define GVN_UNTRACKED_FLAG_LIST(V)             \
-  V(Calls)                                     \
-  V(InobjectFields)                            \
+  V(ArrayElements)                             \
+  V(ArrayLengths)                              \
   V(BackingStoreFields)                        \
+  V(Calls)                                     \
+  V(ContextSlots)                              \
+  V(DoubleArrayElements)                       \
   V(DoubleFields)                              \
   V(ElementsKind)                              \
   V(ElementsPointer)                           \
-  V(ArrayElements)                             \
-  V(DoubleArrayElements)                       \
-  V(SpecializedArrayElements)                  \
   V(GlobalVars)                                \
-  V(ArrayLengths)                              \
-  V(ContextSlots)                              \
-  V(OsrEntries)
+  V(InobjectFields)                            \
+  V(OsrEntries)                                \
+  V(SpecializedArrayElements)
+
 
 #define DECLARE_ABSTRACT_INSTRUCTION(type)          \
   virtual bool Is##type() const { return true; }    \
@@ -407,6 +408,11 @@
     return ((type_ & kString) == kString);
   }
 
+  bool IsNonString() const {
+    return IsTaggedPrimitive() || IsSmi() || IsHeapNumber() ||
+        IsBoolean() || IsJSArray();
+  }
+
   bool IsBoolean() const {
     ASSERT(type_ != kUninitialized);
     return ((type_ & kBoolean) == kBoolean);
@@ -1493,16 +1499,20 @@
 };
 
 
-// We insert soft-deoptimize when we hit code with unknown typefeedback,
-// so that we get a chance of re-optimizing with useful typefeedback.
-// HSoftDeoptimize does not end a basic block as opposed to HDeoptimize.
-class HSoftDeoptimize: public HTemplateInstruction<0> {
+class HDeoptimize: public HTemplateInstruction<0> {
  public:
+  explicit HDeoptimize(Deoptimizer::BailoutType type) : type_(type) {}
+
   virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(SoftDeoptimize)
+  Deoptimizer::BailoutType type() { return type_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
+
+ private:
+  Deoptimizer::BailoutType type_;
 };
 
 
@@ -1517,59 +1527,6 @@
 };
 
 
-class HDeoptimize: public HControlInstruction {
- public:
-  HDeoptimize(int environment_length,
-              int first_local_index,
-              int first_expression_index,
-              Zone* zone)
-      : values_(environment_length, zone),
-        first_local_index_(first_local_index),
-        first_expression_index_(first_expression_index) { }
-
-  virtual Representation RequiredInputRepresentation(int index) {
-    return Representation::None();
-  }
-
-  virtual int OperandCount() { return values_.length(); }
-  virtual HValue* OperandAt(int index) const { return values_[index]; }
-  virtual void PrintDataTo(StringStream* stream);
-
-  virtual int SuccessorCount() { return 0; }
-  virtual HBasicBlock* SuccessorAt(int i) {
-    UNREACHABLE();
-    return NULL;
-  }
-  virtual void SetSuccessorAt(int i, HBasicBlock* block) {
-    UNREACHABLE();
-  }
-
-  void AddEnvironmentValue(HValue* value, Zone* zone) {
-    values_.Add(NULL, zone);
-    SetOperandAt(values_.length() - 1, value);
-  }
-  int first_local_index() { return first_local_index_; }
-  int first_expression_index() { return first_expression_index_; }
-
-  DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
-
-  enum UseEnvironment {
-    kNoUses,
-    kUseAll
-  };
-
- protected:
-  virtual void InternalSetOperandAt(int index, HValue* value) {
-    values_[index] = value;
-  }
-
- private:
-  ZoneList<HValue*> values_;
-  int first_local_index_;
-  int first_expression_index_;
-};
-
-
 class HGoto: public HTemplateControlInstruction<1, 0> {
  public:
   explicit HGoto(HBasicBlock* target) {
@@ -2732,12 +2689,7 @@
 class HCheckMaps: public HTemplateInstruction<2> {
  public:
   static HCheckMaps* New(HValue* value, Handle<Map> map, Zone* zone,
-                         HValue *typecheck = NULL) {
-    HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
-    check_map->map_set_.Add(map, zone);
-    return check_map;
-  }
-
+                         CompilationInfo* info, HValue *typecheck = NULL);
   static HCheckMaps* New(HValue* value, SmallMapList* maps, Zone* zone,
                          HValue *typecheck = NULL) {
     HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
@@ -2749,27 +2701,9 @@
   }
 
   static HCheckMaps* NewWithTransitions(HValue* value, Handle<Map> map,
-                                        Zone* zone) {
-    HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, value);
-    check_map->map_set_.Add(map, zone);
+                                        Zone* zone, CompilationInfo* info);
 
-    // Since transitioned elements maps of the initial map don't fail the map
-    // check, the CheckMaps instruction doesn't need to depend on ElementsKinds.
-    check_map->ClearGVNFlag(kDependsOnElementsKind);
-
-    ElementsKind kind = map->elements_kind();
-    bool packed = IsFastPackedElementsKind(kind);
-    while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
-      kind = GetNextMoreGeneralFastElementsKind(kind, packed);
-      Map* transitioned_map =
-          map->LookupElementsTransitionMap(kind);
-      if (transitioned_map) {
-        check_map->map_set_.Add(Handle<Map>(transitioned_map), zone);
-      }
-    };
-    check_map->map_set_.Sort();
-    return check_map;
-  }
+  bool CanOmitMapChecks() { return omit_; }
 
   virtual bool HasEscapingOperandAt(int index) { return false; }
   virtual Representation RequiredInputRepresentation(int index) {
@@ -2806,7 +2740,7 @@
  private:
   // Clients should use one of the static New* methods above.
   HCheckMaps(HValue* value, Zone *zone, HValue* typecheck)
-      : map_unique_ids_(0, zone) {
+      : omit_(false), map_unique_ids_(0, zone) {
     SetOperandAt(0, value);
     // Use the object value for the dependency if NULL is passed.
     // TODO(titzer): do GVN flags already express this dependency?
@@ -2818,6 +2752,16 @@
     SetGVNFlag(kDependsOnElementsKind);
   }
 
+  void omit(CompilationInfo* info) {
+    omit_ = true;
+    for (int i = 0; i < map_set_.length(); i++) {
+      Handle<Map> map = map_set_.at(i);
+      map->AddDependentCompilationInfo(DependentCode::kPrototypeCheckGroup,
+                                       info);
+    }
+  }
+
+  bool omit_;
   SmallMapList map_set_;
   ZoneList<UniqueValueId> map_unique_ids_;
 };
@@ -3296,6 +3240,11 @@
     return handle_;
   }
 
+  bool InstanceOf(Handle<Map> map) {
+    return handle_->IsJSObject() &&
+        Handle<JSObject>::cast(handle_)->map() == *map;
+  }
+
   bool IsSpecialDouble() const {
     return has_double_value_ &&
         (BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
@@ -3350,7 +3299,8 @@
   virtual HType CalculateInferredType();
   bool IsInteger() { return handle()->IsSmi(); }
   HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
-  HConstant* CopyToTruncatedInt32(Zone* zone) const;
+  Maybe<HConstant*> CopyToTruncatedInt32(Zone* zone);
+  Maybe<HConstant*> CopyToTruncatedNumber(Zone* zone);
   bool HasInteger32Value() const { return has_int32_value_; }
   int32_t Integer32Value() const {
     ASSERT(HasInteger32Value());
@@ -4964,14 +4914,15 @@
     CAN_ALLOCATE_IN_NEW_SPACE = 1 << 0,
     CAN_ALLOCATE_IN_OLD_DATA_SPACE = 1 << 1,
     CAN_ALLOCATE_IN_OLD_POINTER_SPACE = 1 << 2,
-    ALLOCATE_DOUBLE_ALIGNED = 1 << 3
+    ALLOCATE_DOUBLE_ALIGNED = 1 << 3,
+    PREFILL_WITH_FILLER = 1 << 4
   };
 
   HAllocate(HValue* context, HValue* size, HType type, Flags flags)
-      : type_(type),
-        flags_(flags) {
+      : flags_(flags) {
     SetOperandAt(0, context);
     SetOperandAt(1, size);
+    set_type(type);
     set_representation(Representation::Tagged());
     SetFlag(kTrackSideEffectDominators);
     SetGVNFlag(kChangesNewSpacePromotion);
@@ -4996,7 +4947,6 @@
 
   HValue* context() { return OperandAt(0); }
   HValue* size() { return OperandAt(1); }
-  HType type() { return type_; }
 
   virtual Representation RequiredInputRepresentation(int index) {
     if (index == 0) {
@@ -5014,8 +4964,6 @@
     known_initial_map_ = known_initial_map;
   }
 
-  virtual HType CalculateInferredType();
-
   bool CanAllocateInNewSpace() const {
     return (flags_ & CAN_ALLOCATE_IN_NEW_SPACE) != 0;
   }
@@ -5041,6 +4989,14 @@
     return (flags_ & ALLOCATE_DOUBLE_ALIGNED) != 0;
   }
 
+  bool MustPrefillWithFiller() const {
+    return (flags_ & PREFILL_WITH_FILLER) != 0;
+  }
+
+  void SetFlags(Flags flags) {
+    flags_ = static_cast<HAllocate::Flags>(flags_ | flags);
+  }
+
   void UpdateSize(HValue* size) {
     SetOperandAt(1, size);
   }
@@ -5053,7 +5009,6 @@
   DECLARE_CONCRETE_INSTRUCTION(Allocate)
 
  private:
-  HType type_;
   Flags flags_;
   Handle<Map> known_initial_map_;
 };
@@ -5062,10 +5017,10 @@
 class HInnerAllocatedObject: public HTemplateInstruction<1> {
  public:
   HInnerAllocatedObject(HValue* value, int offset, HType type = HType::Tagged())
-      : offset_(offset),
-        type_(type) {
+      : offset_(offset) {
     ASSERT(value->IsAllocate());
     SetOperandAt(0, value);
+    set_type(type);
     set_representation(Representation::Tagged());
   }
 
@@ -5076,15 +5031,12 @@
     return Representation::Tagged();
   }
 
-  virtual HType CalculateInferredType() { return type_; }
-
   virtual void PrintDataTo(StringStream* stream);
 
   DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject)
 
  private:
   int offset_;
-  HType type_;
 };
 
 
diff --git a/src/hydrogen-osr.cc b/src/hydrogen-osr.cc
index a2fa0bf..6c3d6ae 100644
--- a/src/hydrogen-osr.cc
+++ b/src/hydrogen-osr.cc
@@ -94,7 +94,7 @@
     }
   }
 
-  builder_->AddSimulate(osr_entry_id);
+  builder_->Add<HSimulate>(osr_entry_id);
   builder_->Add<HOsrEntry>(osr_entry_id);
   HContext* context = builder_->Add<HContext>();
   environment->BindContext(context);
diff --git a/src/hydrogen-representation-changes.cc b/src/hydrogen-representation-changes.cc
index e8f0140..45d35b2 100644
--- a/src/hydrogen-representation-changes.cc
+++ b/src/hydrogen-representation-changes.cc
@@ -51,9 +51,12 @@
   if (value->IsConstant()) {
     HConstant* constant = HConstant::cast(value);
     // Try to create a new copy of the constant with the new representation.
-    new_value = (is_truncating && to.IsInteger32())
-        ? constant->CopyToTruncatedInt32(graph()->zone())
-        : constant->CopyToRepresentation(to, graph()->zone());
+    if (is_truncating && to.IsInteger32()) {
+      Maybe<HConstant*> res = constant->CopyToTruncatedInt32(graph()->zone());
+      if (res.has_value) new_value = res.value;
+    } else {
+      new_value = constant->CopyToRepresentation(to, graph()->zone());
+    }
   }
 
   if (new_value == NULL) {
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 57220e0..610cb87 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -146,26 +146,6 @@
 }
 
 
-HDeoptimize* HBasicBlock::CreateDeoptimize(
-    HDeoptimize::UseEnvironment has_uses) {
-  ASSERT(HasEnvironment());
-  if (has_uses == HDeoptimize::kNoUses)
-    return new(zone()) HDeoptimize(0, 0, 0, zone());
-
-  HEnvironment* environment = last_environment();
-  int first_local_index = environment->first_local_index();
-  int first_expression_index = environment->first_expression_index();
-  HDeoptimize* instr = new(zone()) HDeoptimize(
-      environment->length(), first_local_index, first_expression_index, zone());
-  for (int i = 0; i < environment->length(); i++) {
-    HValue* val = environment->values()->at(i);
-    instr->AddEnvironmentValue(val, zone());
-  }
-
-  return instr;
-}
-
-
 HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
                                        RemovableSimulate removable) {
   ASSERT(HasEnvironment());
@@ -700,13 +680,16 @@
     : builder_(builder),
       position_(position),
       finished_(false),
+      deopt_then_(false),
+      deopt_else_(false),
       did_then_(false),
       did_else_(false),
       did_and_(false),
       did_or_(false),
       captured_(false),
       needs_compare_(true),
-      split_edge_merge_block_(NULL) {
+      split_edge_merge_block_(NULL),
+      merge_block_(NULL) {
   HEnvironment* env = builder->environment();
   first_true_block_ = builder->CreateBasicBlock(env->Copy());
   last_true_block_ = NULL;
@@ -720,6 +703,8 @@
     : builder_(builder),
       position_(RelocInfo::kNoPosition),
       finished_(false),
+      deopt_then_(false),
+      deopt_else_(false),
       did_then_(false),
       did_else_(false),
       did_and_(false),
@@ -836,14 +821,13 @@
 
 
 void HGraphBuilder::IfBuilder::Deopt() {
-  HBasicBlock* block = builder_->current_block();
-  block->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
-  builder_->set_current_block(NULL);
+  ASSERT(did_then_);
   if (did_else_) {
-    first_false_block_ = NULL;
+    deopt_else_ = true;
   } else {
-    first_true_block_ = NULL;
+    deopt_then_ = true;
   }
+  builder_->Add<HDeoptimize>(Deoptimizer::EAGER);
 }
 
 
@@ -868,20 +852,30 @@
       last_true_block_ = builder_->current_block();
     }
     if (first_true_block_ == NULL) {
-      // Deopt on true. Nothing to do, just continue the false block.
+      // Return on true. Nothing to do, just continue the false block.
     } else if (first_false_block_ == NULL) {
       // Deopt on false. Nothing to do except switching to the true block.
       builder_->set_current_block(last_true_block_);
     } else {
-      HEnvironment* merge_env = last_true_block_->last_environment()->Copy();
-      merge_block_ = builder_->CreateBasicBlock(merge_env);
+      merge_block_ = builder_->graph()->CreateBasicBlock();
       ASSERT(!finished_);
       if (!did_else_) Else();
       ASSERT(!last_true_block_->IsFinished());
       HBasicBlock* last_false_block = builder_->current_block();
       ASSERT(!last_false_block->IsFinished());
-      last_true_block_->GotoNoSimulate(merge_block_);
-      last_false_block->GotoNoSimulate(merge_block_);
+      if (deopt_then_) {
+        last_false_block->GotoNoSimulate(merge_block_);
+        builder_->PadEnvironmentForContinuation(last_true_block_,
+                                                merge_block_);
+        last_true_block_->GotoNoSimulate(merge_block_);
+      } else {
+        last_true_block_->GotoNoSimulate(merge_block_);
+        if (deopt_else_) {
+          builder_->PadEnvironmentForContinuation(last_false_block,
+                                                  merge_block_);
+        }
+        last_false_block->GotoNoSimulate(merge_block_);
+      }
       builder_->set_current_block(merge_block_);
     }
   }
@@ -991,36 +985,6 @@
 }
 
 
-void HGraphBuilder::AddSimulate(BailoutId id,
-                                RemovableSimulate removable) {
-  ASSERT(current_block() != NULL);
-  ASSERT(no_side_effects_scope_count_ == 0);
-  current_block()->AddSimulate(id, removable);
-}
-
-
-HReturn* HGraphBuilder::AddReturn(HValue* value) {
-  HValue* context = environment()->LookupContext();
-  int num_parameters = graph()->info()->num_parameters();
-  HValue* params = Add<HConstant>(num_parameters);
-  HReturn* return_instruction = new(graph()->zone())
-      HReturn(value, context, params);
-  current_block()->FinishExit(return_instruction);
-  return return_instruction;
-}
-
-
-void HGraphBuilder::AddSoftDeoptimize(SoftDeoptimizeMode mode) {
-  isolate()->counters()->soft_deopts_requested()->Increment();
-  if (FLAG_always_opt && mode == CAN_OMIT_SOFT_DEOPT) return;
-  if (current_block()->IsDeoptimizing()) return;
-  Add<HSoftDeoptimize>();
-  isolate()->counters()->soft_deopts_inserted()->Increment();
-  current_block()->MarkAsDeoptimizing();
-  graph()->set_has_soft_deoptimize(true);
-}
-
-
 HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
   HBasicBlock* b = graph()->CreateBasicBlock();
   b->SetInitialEnvironment(env);
@@ -1043,14 +1007,52 @@
 }
 
 
-HValue* HGraphBuilder::BuildCheckMap(HValue* obj,
-                                              Handle<Map> map) {
-  HCheckMaps* check = HCheckMaps::New(obj, map, zone());
+void HGraphBuilder::FinishExitWithHardDeoptimization(
+    HBasicBlock* continuation) {
+  PadEnvironmentForContinuation(current_block(), continuation);
+  Add<HDeoptimize>(Deoptimizer::EAGER);
+  if (no_side_effects_scope_count_ > 0) {
+    current_block()->GotoNoSimulate(continuation);
+  } else {
+    current_block()->Goto(continuation);
+  }
+}
+
+
+void HGraphBuilder::PadEnvironmentForContinuation(
+    HBasicBlock* from,
+    HBasicBlock* continuation) {
+  if (continuation->last_environment() != NULL) {
+    // When merging from a deopt block to a continuation, resolve differences in
+    // environment by pushing undefined and popping extra values so that the
+    // environments match during the join.
+    int continuation_env_length = continuation->last_environment()->length();
+    while (continuation_env_length != from->last_environment()->length()) {
+      if (continuation_env_length > from->last_environment()->length()) {
+        from->last_environment()->Push(graph()->GetConstantUndefined());
+      } else {
+        from->last_environment()->Pop();
+      }
+    }
+  } else {
+    ASSERT(continuation->predecessors()->length() == 0);
+  }
+}
+
+
+HValue* HGraphBuilder::BuildCheckMap(HValue* obj, Handle<Map> map) {
+  HCheckMaps* check = HCheckMaps::New(obj, map, zone(), top_info());
   AddInstruction(check);
   return check;
 }
 
 
+HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
+  if (object->type().IsJSObject()) return object;
+  return Add<HWrapReceiver>(object, function);
+}
+
+
 HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
                                                  HValue* elements,
                                                  ElementsKind kind,
@@ -1208,7 +1210,7 @@
   if (is_store && (fast_elements || fast_smi_only_elements) &&
       store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
     HCheckMaps* check_cow_map = HCheckMaps::New(
-        elements, isolate()->factory()->fixed_array_map(), zone);
+        elements, isolate()->factory()->fixed_array_map(), zone, top_info());
     check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
     AddInstruction(check_cow_map);
   }
@@ -1276,7 +1278,8 @@
                                             length);
       } else {
         HCheckMaps* check_cow_map = HCheckMaps::New(
-            elements, isolate()->factory()->fixed_array_map(), zone);
+            elements, isolate()->factory()->fixed_array_map(),
+            zone, top_info());
         check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
         AddInstruction(check_cow_map);
       }
@@ -1719,7 +1722,7 @@
                       input, graph()->GetConstantMinus1());
         Representation rep = Representation::FromType(type);
         if (type->Is(Type::None())) {
-          AddSoftDeoptimize();
+          Add<HDeoptimize>(Deoptimizer::SOFT);
         }
         if (instr->IsBinaryOperation()) {
           HBinaryOperation* binop = HBinaryOperation::cast(instr);
@@ -1730,7 +1733,7 @@
       }
     case Token::BIT_NOT:
       if (type->Is(Type::None())) {
-        AddSoftDeoptimize();
+        Add<HDeoptimize>(Deoptimizer::SOFT);
       }
       return new(zone()) HBitNot(input);
   }
@@ -2638,7 +2641,7 @@
   ASSERT(!instr->IsControlInstruction());
   owner()->AddInstruction(instr);
   if (instr->HasObservableSideEffects()) {
-    owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE);
+    owner()->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
   }
 }
 
@@ -2680,7 +2683,7 @@
   owner()->AddInstruction(instr);
   owner()->Push(instr);
   if (instr->HasObservableSideEffects()) {
-    owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE);
+    owner()->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
   }
 }
 
@@ -2736,7 +2739,7 @@
   // this one isn't actually needed (and wouldn't work if it were targeted).
   if (instr->HasObservableSideEffects()) {
     builder->Push(instr);
-    builder->AddSimulate(ast_id, REMOVABLE_SIMULATE);
+    builder->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
     builder->Pop();
   }
   BuildBranch(instr);
@@ -2924,7 +2927,7 @@
     VisitVariableDeclaration(scope->function());
   }
   VisitDeclarations(scope->declarations());
-  AddSimulate(BailoutId::Declarations());
+  Add<HSimulate>(BailoutId::Declarations());
 
   HValue* context = environment()->LookupContext();
   Add<HStackCheck>(context, HStackCheck::kFunctionEntry);
@@ -2933,7 +2936,7 @@
   if (HasStackOverflow()) return false;
 
   if (current_block() != NULL) {
-    AddReturn(graph()->GetConstantUndefined());
+    Add<HReturn>(graph()->GetConstantUndefined());
     set_current_block(NULL);
   }
 
@@ -3112,7 +3115,7 @@
 }
 
 
-void HOptimizedGraphBuilder::PushAndAdd(HInstruction* instr) {
+void HGraphBuilder::PushAndAdd(HInstruction* instr) {
   Push(instr);
   AddInstruction(instr);
 }
@@ -3223,10 +3226,10 @@
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
   if (stmt->condition()->ToBooleanIsTrue()) {
-    AddSimulate(stmt->ThenId());
+    Add<HSimulate>(stmt->ThenId());
     Visit(stmt->then_statement());
   } else if (stmt->condition()->ToBooleanIsFalse()) {
-    AddSimulate(stmt->ElseId());
+    Add<HSimulate>(stmt->ElseId());
     Visit(stmt->else_statement());
   } else {
     HBasicBlock* cond_true = graph()->CreateBasicBlock();
@@ -3333,7 +3336,7 @@
     // Not an inlined return, so an actual one.
     CHECK_ALIVE(VisitForValue(stmt->expression()));
     HValue* result = environment()->Pop();
-    AddReturn(result);
+    Add<HReturn>(result);
   } else if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
     // Return from an inlined construct call. In a test context the return value
     // will always evaluate to true, in a value context the return value needs
@@ -3425,7 +3428,7 @@
   HValue* context = environment()->LookupContext();
 
   CHECK_ALIVE(VisitForValue(stmt->tag()));
-  AddSimulate(stmt->EntryId());
+  Add<HSimulate>(stmt->EntryId());
   HValue* tag_value = Pop();
   HBasicBlock* first_test_block = current_block();
 
@@ -3465,7 +3468,7 @@
 
     if (stmt->switch_type() == SwitchStatement::SMI_SWITCH) {
       if (!clause->compare_type()->Is(Type::Smi())) {
-        AddSoftDeoptimize();
+        Add<HDeoptimize>(Deoptimizer::SOFT);
       }
 
       HCompareNumericAndBranch* compare_ =
@@ -3515,7 +3518,7 @@
           normal_block = last_block;
           last_block = NULL;  // Cleared to indicate we've handled it.
         }
-      } else if (!curr_test_block->end()->IsDeoptimize()) {
+      } else {
         normal_block = curr_test_block->end()->FirstSuccessor();
         curr_test_block = curr_test_block->end()->SecondSuccessor();
       }
@@ -3569,7 +3572,7 @@
                                            HBasicBlock* loop_entry,
                                            BreakAndContinueInfo* break_info) {
   BreakAndContinueScope push(break_info, this);
-  AddSimulate(stmt->StackCheckId());
+  Add<HSimulate>(stmt->StackCheckId());
   HValue* context = environment()->LookupContext();
   HStackCheck* stack_check = Add<HStackCheck>(
       context, HStackCheck::kBackwardsBranch);
@@ -3730,7 +3733,7 @@
 
   HInstruction* map = Add<HForInPrepareMap>(
       environment()->LookupContext(), enumerable);
-  AddSimulate(stmt->PrepareId());
+  Add<HSimulate>(stmt->PrepareId());
 
   HInstruction* array = Add<HForInCacheArray>(
       enumerable, map, DescriptorArray::kEnumCacheBridgeCacheIndex);
@@ -4327,7 +4330,7 @@
             }
             AddInstruction(store);
             if (store->HasObservableSideEffects()) {
-              AddSimulate(key->id(), REMOVABLE_SIMULATE);
+              Add<HSimulate>(key->id(), REMOVABLE_SIMULATE);
             }
           } else {
             CHECK_ALIVE(VisitForEffect(value));
@@ -4450,7 +4453,7 @@
     // De-opt if elements kind changed from boilerplate_elements_kind.
     Handle<Map> map = Handle<Map>(original_boilerplate_object->map(),
                                   isolate());
-    AddInstruction(HCheckMaps::New(literal, map, zone()));
+    AddInstruction(HCheckMaps::New(literal, map, zone(), top_info()));
   }
 
   // The array is expected in the bailout environment during computation
@@ -4492,7 +4495,7 @@
         break;
     }
 
-    AddSimulate(expr->GetIdForElement(i));
+    Add<HSimulate>(expr->GetIdForElement(i));
   }
 
   Drop(1);  // array literal index
@@ -4541,14 +4544,15 @@
 
 void HOptimizedGraphBuilder::AddCheckMap(HValue* object, Handle<Map> map) {
   BuildCheckHeapObject(object);
-  AddInstruction(HCheckMaps::New(object, map, zone()));
+  AddInstruction(HCheckMaps::New(object, map, zone(), top_info()));
 }
 
 
 void HOptimizedGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
                                                          Handle<Map> map) {
   BuildCheckHeapObject(object);
-  AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
+  AddInstruction(HCheckMaps::NewWithTransitions(
+      object, map, zone(), top_info()));
 }
 
 
@@ -4822,7 +4826,7 @@
   if (!ast_context()->IsEffect()) Push(result_value);
   store->set_position(position);
   AddInstruction(store);
-  AddSimulate(assignment_id);
+  Add<HSimulate>(assignment_id);
   if (!ast_context()->IsEffect()) Drop(1);
   ast_context()->ReturnValue(result_value);
   return true;
@@ -4881,7 +4885,7 @@
   // know about and do not want to handle ones we've never seen.  Otherwise
   // use a generic IC.
   if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
-    current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
+    FinishExitWithHardDeoptimization(join);
   } else {
     HInstruction* instr = BuildStoreNamedGeneric(object, name, store_value);
     instr->set_position(position);
@@ -4898,10 +4902,10 @@
       // unoptimized code).
       if (instr->HasObservableSideEffects()) {
         if (ast_context()->IsEffect()) {
-          AddSimulate(assignment_id, REMOVABLE_SIMULATE);
+          Add<HSimulate>(assignment_id, REMOVABLE_SIMULATE);
         } else {
           Push(result_value);
-          AddSimulate(assignment_id, REMOVABLE_SIMULATE);
+          Add<HSimulate>(assignment_id, REMOVABLE_SIMULATE);
           Drop(1);
         }
       }
@@ -4929,7 +4933,7 @@
     HValue* value = environment()->ExpressionStackAt(0);
     HValue* object = environment()->ExpressionStackAt(1);
 
-    if (expr->IsUninitialized()) AddSoftDeoptimize();
+    if (expr->IsUninitialized()) Add<HDeoptimize>(Deoptimizer::SOFT);
     return BuildStoreNamed(expr, expr->id(), expr->position(),
                            expr->AssignmentId(), prop, object, value, value);
   } else {
@@ -4946,7 +4950,7 @@
                              &has_side_effects);
     Drop(3);
     Push(value);
-    AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+    Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
     return ast_context()->ReturnValue(Pop());
   }
 }
@@ -4975,14 +4979,14 @@
       }
       builder.Then();
       builder.Else();
-      AddSoftDeoptimize(MUST_EMIT_SOFT_DEOPT);
+      Add<HDeoptimize>(Deoptimizer::EAGER);
       builder.End();
     }
     HInstruction* instr =
         Add<HStoreGlobalCell>(value, cell, lookup.GetPropertyDetails());
     instr->set_position(position);
     if (instr->HasObservableSideEffects()) {
-      AddSimulate(ast_id, REMOVABLE_SIMULATE);
+      Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
     }
   } else {
     HValue* context =  environment()->LookupContext();
@@ -4992,7 +4996,7 @@
                                  value, function_strict_mode_flag());
     instr->set_position(position);
     ASSERT(instr->HasObservableSideEffects());
-    AddSimulate(ast_id, REMOVABLE_SIMULATE);
+    Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
   }
 }
 
@@ -5054,7 +5058,7 @@
   instr->set_position(position);
   AddInstruction(instr);
   if (instr->HasObservableSideEffects()) {
-    AddSimulate(id, REMOVABLE_SIMULATE);
+    Add<HSimulate>(id, REMOVABLE_SIMULATE);
   }
   if (!ast_context()->IsEffect()) Drop(1);
   return ast_context()->ReturnValue(result_value);
@@ -5132,7 +5136,7 @@
         HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
                                                           mode, Top());
         if (instr->HasObservableSideEffects()) {
-          AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+          Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
         }
         break;
       }
@@ -5173,7 +5177,7 @@
       if (load == NULL) load = BuildLoadNamedGeneric(object, name, prop);
       PushAndAdd(load);
       if (load->HasObservableSideEffects()) {
-        AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+        Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
       }
 
       CHECK_ALIVE(VisitForValue(expr->value()));
@@ -5183,7 +5187,7 @@
       HInstruction* instr = BuildBinaryOperation(operation, left, right);
       PushAndAdd(instr);
       if (instr->HasObservableSideEffects()) {
-        AddSimulate(operation->id(), REMOVABLE_SIMULATE);
+        Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
       }
 
       return BuildStoreNamed(prop, expr->id(), expr->position(),
@@ -5201,7 +5205,7 @@
           false,  // is_store
           &has_side_effects);
       Push(load);
-      if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+      if (has_side_effects) Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
 
       CHECK_ALIVE(VisitForValue(expr->value()));
       HValue* right = Pop();
@@ -5210,7 +5214,7 @@
       HInstruction* instr = BuildBinaryOperation(operation, left, right);
       PushAndAdd(instr);
       if (instr->HasObservableSideEffects()) {
-        AddSimulate(operation->id(), REMOVABLE_SIMULATE);
+        Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
       }
 
       HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
@@ -5222,7 +5226,7 @@
       Drop(3);
       Push(instr);
       ASSERT(has_side_effects);  // Stores always have side effects.
-      AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+      Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
       return ast_context()->ReturnValue(Pop());
     }
 
@@ -5344,7 +5348,7 @@
         HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
                                                           mode, Top());
         if (instr->HasObservableSideEffects()) {
-          AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+          Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
         }
         return ast_context()->ReturnValue(Pop());
       }
@@ -5378,7 +5382,7 @@
   HValue* value = environment()->Pop();
   HThrow* instr = Add<HThrow>(context, value);
   instr->set_position(expr->position());
-  AddSimulate(expr->id());
+  Add<HSimulate>(expr->id());
   current_block()->FinishExit(new(zone()) HAbnormalExit);
   set_current_block(NULL);
 }
@@ -5410,7 +5414,7 @@
     Handle<String> name,
     Property* expr) {
   if (expr->IsUninitialized()) {
-    AddSoftDeoptimize();
+    Add<HDeoptimize>(Deoptimizer::SOFT);
   }
   HValue* context = environment()->LookupContext();
   return new(zone()) HLoadNamedGeneric(context, object, name);
@@ -5506,7 +5510,8 @@
     Handle<Map> map,
     bool is_store,
     KeyedAccessStoreMode store_mode) {
-  HCheckMaps* mapcheck = HCheckMaps::New(object, map, zone(), dependency);
+  HCheckMaps* mapcheck = HCheckMaps::New(
+      object, map, zone(), top_info(), dependency);
   AddInstruction(mapcheck);
   if (dependency) {
     mapcheck->ClearGVNFlag(kDependsOnElementsKind);
@@ -5690,7 +5695,7 @@
       if (is_store && !IsFastDoubleElementsKind(elements_kind)) {
         AddInstruction(HCheckMaps::New(
             elements, isolate()->factory()->fixed_array_map(),
-            zone(), mapcompare));
+            zone(), top_info(), mapcompare));
       }
       if (map->IsJSArray()) {
         HInstruction* length = AddLoad(object, HObjectAccess::ForArrayLength(),
@@ -5732,7 +5737,8 @@
   }
 
   // Deopt if none of the cases matched.
-  current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
+  NoObservableSideEffectsScope scope(this);
+  FinishExitWithHardDeoptimization(join);
   set_current_block(join);
   return is_store ? NULL : Pop();
 }
@@ -5768,12 +5774,12 @@
   } else {
     if (is_store) {
       if (expr->IsAssignment() && expr->AsAssignment()->IsUninitialized()) {
-        AddSoftDeoptimize();
+        Add<HDeoptimize>(Deoptimizer::SOFT);
       }
       instr = BuildStoreKeyedGeneric(obj, key, val);
     } else {
       if (expr->AsProperty()->IsUninitialized()) {
-        AddSoftDeoptimize();
+        Add<HDeoptimize>(Deoptimizer::SOFT);
       }
       instr = BuildLoadKeyedGeneric(obj, key);
     }
@@ -5952,10 +5958,10 @@
         &has_side_effects);
     if (has_side_effects) {
       if (ast_context()->IsEffect()) {
-        AddSimulate(expr->id(), REMOVABLE_SIMULATE);
+        Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
       } else {
         Push(load);
-        AddSimulate(expr->id(), REMOVABLE_SIMULATE);
+        Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
         Drop(1);
       }
     }
@@ -6057,7 +6063,7 @@
     PreProcessCall(call);
     AddInstruction(call);
     if (!ast_context()->IsEffect()) Push(call);
-    AddSimulate(expr->id(), REMOVABLE_SIMULATE);
+    Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
     if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
   }
 
@@ -6189,7 +6195,11 @@
   // know about and do not want to handle ones we've never seen.  Otherwise
   // use a generic IC.
   if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
-    current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
+    // Because the deopt may be the only path in the polymorphic call, make sure
+    // that the environment stack matches the depth on deopt that it otherwise
+    // would have had after a successful call.
+    Drop(argument_count - (ast_context()->IsEffect() ? 0 : 1));
+    FinishExitWithHardDeoptimization(join);
   } else {
     HValue* context = environment()->LookupContext();
     HCallNamed* call = new(zone()) HCallNamed(context, name, argument_count);
@@ -6447,7 +6457,7 @@
   inner_env->BindContext(context);
 #endif
 
-  AddSimulate(return_id);
+  Add<HSimulate>(return_id);
   current_block()->UpdateEnvironment(inner_env);
   HArgumentsObject* arguments_object = NULL;
 
@@ -6881,7 +6891,7 @@
   if (function_state()->outer() == NULL) {
     HInstruction* elements = Add<HArgumentsElements>(false);
     HInstruction* length = Add<HArgumentsLength>(elements);
-    HValue* wrapped_receiver = Add<HWrapReceiver>(receiver, function);
+    HValue* wrapped_receiver = BuildWrapReceiver(receiver, function);
     HInstruction* result =
         new(zone()) HApplyArguments(function,
                                     wrapped_receiver,
@@ -6898,7 +6908,7 @@
     HArgumentsObject* args = function_state()->entry()->arguments_object();
     const ZoneList<HValue*>* arguments_values = args->arguments_values();
     int arguments_count = arguments_values->length();
-    PushAndAdd(new(zone()) HWrapReceiver(receiver, function));
+    Push(BuildWrapReceiver(receiver, function));
     for (int i = 1; i < arguments_count; i++) {
       Push(arguments_values->at(i));
     }
@@ -7460,8 +7470,8 @@
 
 void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->expression()));
-  HValue* value = Pop();
   Handle<Type> operand_type = expr->expression()->bounds().lower;
+  HValue* value = TruncateToNumber(Pop(), &operand_type);
   HInstruction* instr = BuildUnaryMathOp(value, operand_type, Token::SUB);
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
@@ -7469,8 +7479,8 @@
 
 void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->expression()));
-  HValue* value = Pop();
   Handle<Type> operand_type = expr->expression()->bounds().lower;
+  HValue* value = TruncateToNumber(Pop(), &operand_type);
   HInstruction* instr = BuildUnaryMathOp(value, operand_type, Token::BIT_NOT);
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
@@ -7625,7 +7635,7 @@
         HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
                                                           mode, after);
         if (instr->HasObservableSideEffects()) {
-          AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+          Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
         }
         break;
       }
@@ -7668,7 +7678,7 @@
       if (load == NULL) load = BuildLoadNamedGeneric(object, name, prop);
       PushAndAdd(load);
       if (load->HasObservableSideEffects()) {
-        AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+        Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
       }
 
       after = BuildIncrement(returns_original_input, expr);
@@ -7691,7 +7701,7 @@
           false,  // is_store
           &has_side_effects);
       Push(load);
-      if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+      if (has_side_effects) Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
 
       after = BuildIncrement(returns_original_input, expr);
       input = environment()->ExpressionStackAt(0);
@@ -7708,7 +7718,7 @@
       environment()->SetExpressionStackAt(0, after);
       if (returns_original_input) environment()->SetExpressionStackAt(1, input);
       ASSERT(has_side_effects);  // Stores always have side effects.
-      AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+      Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
     }
   }
 
@@ -7800,6 +7810,40 @@
 }
 
 
+HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) {
+  if (value->IsConstant()) {
+    HConstant* constant = HConstant::cast(value);
+    Maybe<HConstant*> number = constant->CopyToTruncatedNumber(zone());
+    if (number.has_value) {
+      *expected = handle(Type::Number(), isolate());
+      return AddInstruction(number.value);
+    }
+    return value;
+  }
+
+  Handle<Type> expected_type = *expected;
+  Representation rep = Representation::FromType(expected_type);
+  if (!rep.IsTagged()) return value;
+
+  // If our type feedback suggests that we can non-observably truncate to number
+  // we introduce the appropriate check here. This avoids 'value' having a
+  // tagged representation later on.
+  if (expected_type->Is(Type::Oddball())) {
+    // TODO(olivf) The BinaryOpStub only records undefined. It might pay off to
+    // also record booleans and convert them to 0/1 here.
+    IfBuilder if_nan(this);
+    if_nan.If<HCompareObjectEqAndBranch>(value,
+        graph()->GetConstantUndefined());
+    if_nan.Then();
+    if_nan.ElseDeopt();
+    if_nan.End();
+    return Add<HConstant>(OS::nan_value(), Representation::Double());
+  }
+
+  return value;
+}
+
+
 HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
     BinaryOperation* expr,
     HValue* left,
@@ -7813,13 +7857,21 @@
   Representation right_rep = Representation::FromType(right_type);
   Representation result_rep = Representation::FromType(result_type);
 
+  if (expr->op() != Token::ADD ||
+      (left->type().IsNonString() && right->type().IsNonString())) {
+    // For addition we can only truncate the arguments to number if we can
+    // prove that we will not end up in string concatenation mode.
+    left = TruncateToNumber(left, &left_type);
+    right = TruncateToNumber(right, &right_type);
+  }
+
   if (left_type->Is(Type::None())) {
-    AddSoftDeoptimize();
+    Add<HDeoptimize>(Deoptimizer::SOFT);
     // TODO(rossberg): we should be able to get rid of non-continuous defaults.
     left_type = handle(Type::Any(), isolate());
   }
   if (right_type->Is(Type::None())) {
-    AddSoftDeoptimize();
+    Add<HDeoptimize>(Deoptimizer::SOFT);
     right_type = handle(Type::Any(), isolate());
   }
   HInstruction* instr = NULL;
@@ -8169,7 +8221,7 @@
   // Cases handled below depend on collected type feedback. They should
   // soft deoptimize when there is no type feedback.
   if (combined_type->Is(Type::None())) {
-    AddSoftDeoptimize();
+    Add<HDeoptimize>(Deoptimizer::SOFT);
     combined_type = left_type = right_type = handle(Type::Any(), isolate());
   }
 
@@ -8672,7 +8724,7 @@
         HStoreContextSlot* store = Add<HStoreContextSlot>(
             context, variable->index(), HStoreContextSlot::kNoCheck, value);
         if (store->HasObservableSideEffects()) {
-          AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
+          Add<HSimulate>(proxy->id(), REMOVABLE_SIMULATE);
         }
       }
       break;
@@ -8710,7 +8762,7 @@
       HStoreContextSlot* store = Add<HStoreContextSlot>(
           context, variable->index(), HStoreContextSlot::kNoCheck, value);
       if (store->HasObservableSideEffects()) {
-        AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
+        Add<HSimulate>(proxy->id(), REMOVABLE_SIMULATE);
       }
       break;
     }
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 797b444..3d03933 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -137,17 +137,15 @@
   }
 
   int PredecessorIndexOf(HBasicBlock* predecessor) const;
-  void AddSimulate(BailoutId ast_id,
-                   RemovableSimulate removable = FIXED_SIMULATE) {
-    AddInstruction(CreateSimulate(ast_id, removable));
+  HSimulate* AddSimulate(BailoutId ast_id,
+                         RemovableSimulate removable = FIXED_SIMULATE) {
+    HSimulate* instr = CreateSimulate(ast_id, removable);
+    AddInstruction(instr);
+    return instr;
   }
   void AssignCommonDominator(HBasicBlock* other);
   void AssignLoopSuccessorDominators();
 
-  void FinishExitWithDeoptimization(HDeoptimize::UseEnvironment has_uses) {
-    FinishExit(CreateDeoptimize(has_uses));
-  }
-
   // Add the inlined function exit sequence, adding an HLeaveInlined
   // instruction and updating the bailout environment.
   void AddLeaveInlined(HValue* return_value, FunctionState* state);
@@ -182,11 +180,12 @@
 #endif
 
  private:
+  friend class HGraphBuilder;
+
   void RegisterPredecessor(HBasicBlock* pred);
   void AddDominatedBlock(HBasicBlock* block);
 
   HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
-  HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
 
   int block_id_;
   HGraph* graph_;
@@ -1023,11 +1022,6 @@
             new(zone()) I(p1, p2, p3, p4, p5, p6, p7, p8)));
   }
 
-  void AddSimulate(BailoutId id,
-                   RemovableSimulate removable = FIXED_SIMULATE);
-
-  HReturn* AddReturn(HValue* value);
-
   void IncrementInNoSideEffectsScope() {
     no_side_effects_scope_count_++;
   }
@@ -1044,6 +1038,7 @@
 
   HValue* BuildCheckHeapObject(HValue* object);
   HValue* BuildCheckMap(HValue* obj, Handle<Map> map);
+  HValue* BuildWrapReceiver(HValue* object, HValue* function);
 
   // Building common constructs
   HValue* BuildCheckForCapacityGrow(HValue* object,
@@ -1118,12 +1113,11 @@
 
   HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin, HValue* context);
 
-  enum SoftDeoptimizeMode {
-    MUST_EMIT_SOFT_DEOPT,
-    CAN_OMIT_SOFT_DEOPT
-  };
+  HValue* TruncateToNumber(HValue* value, Handle<Type>* expected);
 
-  void AddSoftDeoptimize(SoftDeoptimizeMode mode = CAN_OMIT_SOFT_DEOPT);
+  void PushAndAdd(HInstruction* instr);
+
+  void FinishExitWithHardDeoptimization(HBasicBlock* continuation);
 
   class IfBuilder {
    public:
@@ -1228,7 +1222,6 @@
     void ElseDeopt() {
       Else();
       Deopt();
-      End();
     }
 
     void Return(HValue* value);
@@ -1241,6 +1234,8 @@
     HGraphBuilder* builder_;
     int position_;
     bool finished_ : 1;
+    bool deopt_then_ : 1;
+    bool deopt_else_ : 1;
     bool did_then_ : 1;
     bool did_else_ : 1;
     bool did_and_ : 1;
@@ -1422,12 +1417,68 @@
 
  private:
   HGraphBuilder();
+
+  void PadEnvironmentForContinuation(HBasicBlock* from,
+                                     HBasicBlock* continuation);
+
   CompilationInfo* info_;
   HGraph* graph_;
   HBasicBlock* current_block_;
   int no_side_effects_scope_count_;
 };
 
+
+template<>
+inline HDeoptimize* HGraphBuilder::Add(Deoptimizer::BailoutType type) {
+  if (type == Deoptimizer::SOFT) {
+    isolate()->counters()->soft_deopts_requested()->Increment();
+    if (FLAG_always_opt) return NULL;
+  }
+  if (current_block()->IsDeoptimizing()) return NULL;
+  HDeoptimize* instr = new(zone()) HDeoptimize(type);
+  AddInstruction(instr);
+  if (type == Deoptimizer::SOFT) {
+    isolate()->counters()->soft_deopts_inserted()->Increment();
+    graph()->set_has_soft_deoptimize(true);
+  }
+  current_block()->MarkAsDeoptimizing();
+  return instr;
+}
+
+
+template<>
+inline HSimulate* HGraphBuilder::Add(BailoutId id,
+                                     RemovableSimulate removable) {
+  HSimulate* instr = current_block()->CreateSimulate(id, removable);
+  AddInstruction(instr);
+  return instr;
+}
+
+
+template<>
+inline HSimulate* HGraphBuilder::Add(BailoutId id) {
+  return Add<HSimulate>(id, FIXED_SIMULATE);
+}
+
+
+template<>
+inline HReturn* HGraphBuilder::Add(HValue* value) {
+  HValue* context = environment()->LookupContext();
+  int num_parameters = graph()->info()->num_parameters();
+  HValue* params = Add<HConstant>(num_parameters);
+  HReturn* return_instruction = new(graph()->zone())
+      HReturn(value, context, params);
+  current_block()->FinishExit(return_instruction);
+  return return_instruction;
+}
+
+
+template<>
+inline HReturn* HGraphBuilder::Add(HConstant* p1) {
+  return Add<HReturn>(static_cast<HValue*>(p1));
+}
+
+
 class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
  public:
   // A class encapsulating (lazily-allocated) break and continue blocks for
@@ -1666,8 +1717,6 @@
   // Visit a list of expressions from left to right, each in a value context.
   void VisitExpressions(ZoneList<Expression*>* exprs);
 
-  void PushAndAdd(HInstruction* instr);
-
   // Remove the arguments from the bailout environment and emit instructions
   // to push them as outgoing parameters.
   template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 548cbaa..5789f49 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -43,6 +43,16 @@
 namespace internal {
 
 
+void ToNumberStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { eax };
+  descriptor->register_param_count_ = 1;
+  descriptor->register_params_ = registers;
+  descriptor->deoptimization_handler_ = NULL;
+}
+
+
 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
     Isolate* isolate,
     CodeStubInterfaceDescriptor* descriptor) {
@@ -300,27 +310,6 @@
 }
 
 
-void ToNumberStub::Generate(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in eax.
-  Label check_heap_number, call_builtin;
-  __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
-  __ ret(0);
-
-  __ bind(&check_heap_number);
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  Factory* factory = masm->isolate()->factory();
-  __ cmp(ebx, Immediate(factory->heap_number_map()));
-  __ j(not_equal, &call_builtin, Label::kNear);
-  __ ret(0);
-
-  __ bind(&call_builtin);
-  __ pop(ecx);  // Pop return address.
-  __ push(eax);
-  __ push(ecx);  // Push return address.
-  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
   // Create a new closure from the given function info in new
   // space. Set the context to the current context in esi.
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 505cd4f..fcacaf5 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -741,6 +741,17 @@
   __ bind(&done);
 }
 
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+  SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+  SetFrameSlot(offset, value);
+}
+
+
 #undef __
 
 
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 66a7c1c..8f11acc 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -4045,7 +4045,7 @@
   __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
   __ and_(scratch, Immediate(
       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmp(scratch, ASCII_STRING_TYPE);
+  __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
   __ j(not_equal, &bailout);
 
   // Add (separator length times array_length) - separator length
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 2c234d8..54ae715 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -1003,12 +1003,6 @@
 }
 
 
-void LCodeGen::SoftDeoptimize(LEnvironment* environment) {
-  ASSERT(!info()->IsStub());
-  DeoptimizeIf(no_condition, environment, Deoptimizer::SOFT);
-}
-
-
 void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
   ZoneList<Handle<Map> > maps(1, zone());
   int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -5802,6 +5796,7 @@
 
 
 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+  if (instr->hydrogen()->CanOmitMapChecks()) return;
   LOperand* input = instr->value();
   ASSERT(input->IsRegister());
   Register reg = ToRegister(input);
@@ -5992,6 +5987,7 @@
 
 
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+  if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
   Register reg = ToRegister(instr->temp());
 
   ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
@@ -5999,11 +5995,9 @@
 
   ASSERT(prototypes->length() == maps->length());
 
-  if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
-    for (int i = 0; i < prototypes->length(); i++) {
-      __ LoadHeapObject(reg, prototypes->at(i));
-      DoCheckMapCommon(reg, maps->at(i), instr);
-    }
+  for (int i = 0; i < prototypes->length(); i++) {
+    __ LoadHeapObject(reg, prototypes->at(i));
+    DoCheckMapCommon(reg, maps->at(i), instr);
   }
 }
 
@@ -6046,6 +6040,23 @@
   }
 
   __ bind(deferred->exit());
+
+  if (instr->hydrogen()->MustPrefillWithFiller()) {
+    if (instr->size()->IsConstantOperand()) {
+      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+      __ mov(temp, (size / kPointerSize) - 1);
+    } else {
+      temp = ToRegister(instr->size());
+      __ shr(temp, kPointerSizeLog2);
+      __ dec(temp);
+    }
+    Label loop;
+    __ bind(&loop);
+    __ mov(FieldOperand(result, temp, times_pointer_size, 0),
+        isolate()->factory()->one_pointer_filler_map());
+    __ dec(temp);
+    __ j(not_zero, &loop);
+  }
 }
 
 
@@ -6306,11 +6317,15 @@
 
 
 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
-  if (instr->hydrogen_value()->IsSoftDeoptimize()) {
-    SoftDeoptimize(instr->environment());
-  } else {
-    DeoptimizeIf(no_condition, instr->environment());
+  Deoptimizer::BailoutType type = instr->hydrogen()->type();
+  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+  // needed return address), even though the implementation of LAZY and EAGER is
+  // now identical. When LAZY is eventually completely folded into EAGER, remove
+  // the special case below.
+  if (info()->IsStub() && type == Deoptimizer::EAGER) {
+    type = Deoptimizer::LAZY;
   }
+  DeoptimizeIf(no_condition, instr->environment(), type);
 }
 
 
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index eb75225..30b889f 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -283,7 +283,6 @@
                     LEnvironment* environment,
                     Deoptimizer::BailoutType bailout_type);
   void DeoptimizeIf(Condition cc, LEnvironment* environment);
-  void SoftDeoptimize(LEnvironment* environment);
 
   void AddToTranslation(Translation* translation,
                         LOperand* op,
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index aebe26b..e903519 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -754,11 +754,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
-  return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
   return AssignEnvironment(new(zone()) LDeoptimize);
 }
@@ -2063,8 +2058,10 @@
 
 
 LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
-  LUnallocated* temp = TempRegister();
+  LUnallocated* temp = NULL;
+  if (!instr->CanOmitPrototypeChecks()) temp = TempRegister();
   LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
+  if (instr->CanOmitPrototypeChecks()) return result;
   return AssignEnvironment(result);
 }
 
@@ -2081,8 +2078,10 @@
 
 
 LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* value = NULL;
+  if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value());
   LCheckMaps* result = new(zone()) LCheckMaps(value);
+  if (instr->CanOmitMapChecks()) return result;
   return AssignEnvironment(result);
 }
 
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index a938ee5..9dd97c1 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -66,6 +66,7 @@
   V(CheckFunction)                              \
   V(CheckInstanceType)                          \
   V(CheckMaps)                                  \
+  V(CheckMapValue)                              \
   V(CheckNonSmi)                                \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
@@ -84,14 +85,18 @@
   V(ConstantS)                                  \
   V(ConstantT)                                  \
   V(Context)                                    \
+  V(DateField)                                  \
   V(DebugBreak)                                 \
   V(DeclareGlobals)                             \
   V(Deoptimize)                                 \
   V(DivI)                                       \
   V(DoubleToI)                                  \
   V(DoubleToSmi)                                \
+  V(Drop)                                       \
   V(DummyUse)                                   \
   V(ElementsKind)                               \
+  V(ForInCacheArray)                            \
+  V(ForInPrepareMap)                            \
   V(FunctionLiteral)                            \
   V(GetCachedArrayIndex)                        \
   V(GlobalObject)                               \
@@ -99,13 +104,13 @@
   V(Goto)                                       \
   V(HasCachedArrayIndexAndBranch)               \
   V(HasInstanceTypeAndBranch)                   \
+  V(InnerAllocatedObject)                       \
   V(InstanceOf)                                 \
   V(InstanceOfKnownGlobal)                      \
   V(InstanceSize)                               \
   V(InstructionGap)                             \
   V(Integer32ToDouble)                          \
   V(Integer32ToSmi)                             \
-  V(Uint32ToDouble)                             \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
   V(IsObjectAndBranch)                          \
@@ -118,6 +123,7 @@
   V(LinkObjectInList)                           \
   V(LoadContextSlot)                            \
   V(LoadExternalArrayPointer)                   \
+  V(LoadFieldByIndex)                           \
   V(LoadFunctionPrototype)                      \
   V(LoadGlobalCell)                             \
   V(LoadGlobalGeneric)                          \
@@ -180,16 +186,10 @@
   V(TrapAllocationMemento)                      \
   V(Typeof)                                     \
   V(TypeofIsAndBranch)                          \
+  V(Uint32ToDouble)                             \
   V(UnknownOSRValue)                            \
   V(ValueOf)                                    \
-  V(ForInPrepareMap)                            \
-  V(ForInCacheArray)                            \
-  V(CheckMapValue)                              \
-  V(LoadFieldByIndex)                           \
-  V(DateField)                                  \
-  V(WrapReceiver)                               \
-  V(Drop)                                       \
-  V(InnerAllocatedObject)
+  V(WrapReceiver)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
@@ -424,6 +424,7 @@
 class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
 
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index ef90c10..2ab5a25 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1248,6 +1248,7 @@
                               Label* gc_required,
                               AllocationFlags flags) {
   ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+  ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -2798,7 +2799,8 @@
   // Check that both are flat ASCII strings.
   const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+  const int kFlatAsciiStringTag =
+      kStringTag | kOneByteStringTag | kSeqStringTag;
   // Interleave bits from both instance types and compare them in one check.
   ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
   and_(scratch1, kFlatAsciiStringMask);
diff --git a/src/isolate.cc b/src/isolate.cc
index 4adcd69..4cf0252 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -95,7 +95,7 @@
   simulator_ = NULL;
 #endif
   js_entry_sp_ = NULL;
-  external_callback_ = NULL;
+  external_callback_scope_ = NULL;
   current_vm_state_ = EXTERNAL;
   try_catch_handler_address_ = NULL;
   context_ = NULL;
diff --git a/src/isolate.h b/src/isolate.h
index 6e5d5c6..2612242 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -65,6 +65,7 @@
 class DeoptimizerData;
 class Deserializer;
 class EmptyStatement;
+class ExternalCallbackScope;
 class ExternalReferenceTable;
 class Factory;
 class FunctionInfoListener;
@@ -279,7 +280,8 @@
 #endif  // USE_SIMULATOR
 
   Address js_entry_sp_;  // the stack pointer of the bottom JS entry frame
-  Address external_callback_;  // the external callback we're currently in
+  // the external callback we're currently in
+  ExternalCallbackScope* external_callback_scope_;
   StateTag current_vm_state_;
 
   // Generated code scratch locations.
@@ -1032,11 +1034,11 @@
 
   static const int kJSRegexpStaticOffsetsVectorSize = 128;
 
-  Address external_callback() {
-    return thread_local_top_.external_callback_;
+  ExternalCallbackScope* external_callback_scope() {
+    return thread_local_top_.external_callback_scope_;
   }
-  void set_external_callback(Address callback) {
-    thread_local_top_.external_callback_ = callback;
+  void set_external_callback_scope(ExternalCallbackScope* scope) {
+    thread_local_top_.external_callback_scope_ = scope;
   }
 
   StateTag current_vm_state() {
diff --git a/src/lithium.cc b/src/lithium.cc
index 3df8d6c..e9c3531 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -270,7 +270,7 @@
     return -(index + 3) * kPointerSize;
   } else {
     // Incoming parameter. Skip the return address.
-    return -(index - 1) * kPointerSize;
+    return -(index + 1) * kPointerSize + kFPOnStackSize + kPCOnStackSize;
   }
 }
 
diff --git a/src/log.cc b/src/log.cc
index d26279b..520723e 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -54,6 +54,14 @@
 #undef DECLARE_EVENT
 
 
+#define PROFILER_LOG(Call)                                \
+  do {                                                    \
+    CpuProfiler* cpu_profiler = isolate_->cpu_profiler(); \
+    if (cpu_profiler->is_profiling()) {                   \
+      cpu_profiler->Call;                                 \
+    }                                                     \
+  } while (false);
+
 // ComputeMarker must only be used when SharedFunctionInfo is known.
 static const char* ComputeMarker(Code* code) {
   switch (code->kind()) {
@@ -543,7 +551,7 @@
  public:
   explicit JitLogger(JitCodeEventHandler code_event_handler);
 
-  void CodeMovedEvent(Address from, Address to);
+  void CodeMoveEvent(Address from, Address to);
   void CodeDeleteEvent(Address from);
   void AddCodeLinePosInfoEvent(
       void* jit_handler_data,
@@ -588,7 +596,7 @@
 }
 
 
-void JitLogger::CodeMovedEvent(Address from, Address to) {
+void JitLogger::CodeMoveEvent(Address from, Address to) {
   Code* from_code = Code::cast(HeapObject::FromAddress(from));
 
   JitCodeEvent event;
@@ -1209,7 +1217,7 @@
 
 void Logger::CallbackEventInternal(const char* prefix, Name* name,
                                    Address entry_point) {
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  if (!FLAG_log_code || !log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
   msg.Append("%s,%s,-2,",
              kLogEventsNames[CODE_CREATION_EVENT],
@@ -1235,19 +1243,19 @@
 
 
 void Logger::CallbackEvent(Name* name, Address entry_point) {
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  PROFILER_LOG(CallbackEvent(name, entry_point));
   CallbackEventInternal("", name, entry_point);
 }
 
 
 void Logger::GetterCallbackEvent(Name* name, Address entry_point) {
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  PROFILER_LOG(GetterCallbackEvent(name, entry_point));
   CallbackEventInternal("get ", name, entry_point);
 }
 
 
 void Logger::SetterCallbackEvent(Name* name, Address entry_point) {
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  PROFILER_LOG(SetterCallbackEvent(name, entry_point));
   CallbackEventInternal("set ", name, entry_point);
 }
 
@@ -1268,8 +1276,9 @@
 void Logger::CodeCreateEvent(LogEventsAndTags tag,
                              Code* code,
                              const char* comment) {
-  if (!is_logging_code_events()) return;
+  PROFILER_LOG(CodeCreateEvent(tag, code, comment));
 
+  if (!is_logging_code_events()) return;
   JIT_LOG(CodeCreateEvent(tag, code, comment));
   LL_LOG(CodeCreateEvent(tag, code, comment));
   CODE_ADDRESS_MAP_LOG(CodeCreateEvent(tag, code, comment));
@@ -1286,8 +1295,9 @@
 void Logger::CodeCreateEvent(LogEventsAndTags tag,
                              Code* code,
                              Name* name) {
-  if (!is_logging_code_events()) return;
+  PROFILER_LOG(CodeCreateEvent(tag, code, name));
 
+  if (!is_logging_code_events()) return;
   JIT_LOG(CodeCreateEvent(tag, code, name));
   LL_LOG(CodeCreateEvent(tag, code, name));
   CODE_ADDRESS_MAP_LOG(CodeCreateEvent(tag, code, name));
@@ -1312,8 +1322,9 @@
                              SharedFunctionInfo* shared,
                              CompilationInfo* info,
                              Name* name) {
-  if (!is_logging_code_events()) return;
+  PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, name));
 
+  if (!is_logging_code_events()) return;
   JIT_LOG(CodeCreateEvent(tag, code, shared, info, name));
   LL_LOG(CodeCreateEvent(tag, code, shared, info, name));
   CODE_ADDRESS_MAP_LOG(CodeCreateEvent(tag, code, shared, info, name));
@@ -1348,8 +1359,9 @@
                              SharedFunctionInfo* shared,
                              CompilationInfo* info,
                              Name* source, int line) {
-  if (!is_logging_code_events()) return;
+  PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, source, line));
 
+  if (!is_logging_code_events()) return;
   JIT_LOG(CodeCreateEvent(tag, code, shared, info, source, line));
   LL_LOG(CodeCreateEvent(tag, code, shared, info, source, line));
   CODE_ADDRESS_MAP_LOG(CodeCreateEvent(tag, code, shared, info, source, line));
@@ -1378,8 +1390,9 @@
 void Logger::CodeCreateEvent(LogEventsAndTags tag,
                              Code* code,
                              int args_count) {
-  if (!is_logging_code_events()) return;
+  PROFILER_LOG(CodeCreateEvent(tag, code, args_count));
 
+  if (!is_logging_code_events()) return;
   JIT_LOG(CodeCreateEvent(tag, code, args_count));
   LL_LOG(CodeCreateEvent(tag, code, args_count));
   CODE_ADDRESS_MAP_LOG(CodeCreateEvent(tag, code, args_count));
@@ -1394,6 +1407,9 @@
 
 
 void Logger::CodeMovingGCEvent() {
+  PROFILER_LOG(CodeMovingGCEvent());
+
+  if (!is_logging_code_events()) return;
   if (!log_->IsEnabled() || !FLAG_ll_prof) return;
   LL_LOG(CodeMovingGCEvent());
   OS::SignalCodeMovingGC();
@@ -1401,8 +1417,9 @@
 
 
 void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
-  if (!is_logging_code_events()) return;
+  PROFILER_LOG(RegExpCodeCreateEvent(code, source));
 
+  if (!is_logging_code_events()) return;
   JIT_LOG(RegExpCodeCreateEvent(code, source));
   LL_LOG(RegExpCodeCreateEvent(code, source));
   CODE_ADDRESS_MAP_LOG(RegExpCodeCreateEvent(code, source));
@@ -1419,8 +1436,10 @@
 
 
 void Logger::CodeMoveEvent(Address from, Address to) {
-  JIT_LOG(CodeMovedEvent(from, to));
-  if (!log_->IsEnabled()) return;
+  PROFILER_LOG(CodeMoveEvent(from, to));
+
+  if (!is_logging_code_events()) return;
+  JIT_LOG(CodeMoveEvent(from, to));
   LL_LOG(CodeMoveEvent(from, to));
   CODE_ADDRESS_MAP_LOG(CodeMoveEvent(from, to));
   MoveEventInternal(CODE_MOVE_EVENT, from, to);
@@ -1428,12 +1447,14 @@
 
 
 void Logger::CodeDeleteEvent(Address from) {
+  PROFILER_LOG(CodeDeleteEvent(from));
+
+  if (!is_logging_code_events()) return;
   JIT_LOG(CodeDeleteEvent(from));
-  if (!log_->IsEnabled()) return;
   LL_LOG(CodeDeleteEvent(from));
   CODE_ADDRESS_MAP_LOG(CodeDeleteEvent(from));
 
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  if (!FLAG_log_code || !log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
   msg.Append("%s,", kLogEventsNames[CODE_DELETE_EVENT]);
   msg.AppendAddress(from);
@@ -1498,6 +1519,9 @@
 
 
 void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
+  PROFILER_LOG(SharedFunctionInfoMoveEvent(from, to));
+
+  if (!is_logging_code_events()) return;
   MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to);
 }
 
@@ -1505,7 +1529,7 @@
 void Logger::MoveEventInternal(LogEventsAndTags event,
                                Address from,
                                Address to) {
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  if (!FLAG_log_code || !log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
   msg.Append("%s,", kLogEventsNames[event]);
   msg.AppendAddress(from);
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 95f673c..815cae5 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -2396,7 +2396,6 @@
   string_table->ElementsRemoved(v.PointersRemoved());
   heap()->external_string_table_.Iterate(&v);
   heap()->external_string_table_.CleanUp();
-  heap()->error_object_list_.RemoveUnmarked(heap());
 
   // Process the weak references.
   MarkCompactWeakObjectRetainer mark_compact_object_retainer;
@@ -3463,9 +3462,6 @@
   heap_->UpdateReferencesInExternalStringTable(
       &UpdateReferenceInExternalStringTableEntry);
 
-  // Update pointers in the new error object list.
-  heap_->error_object_list()->UpdateReferences();
-
   if (!FLAG_watch_ic_patching) {
     // Update JSFunction pointers from the runtime profiler.
     heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index f984b3a..0e1b224 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -39,6 +39,16 @@
 namespace internal {
 
 
+void ToNumberStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { a0 };
+  descriptor->register_param_count_ = 1;
+  descriptor->register_params_ = registers;
+  descriptor->deoptimization_handler_ = NULL;
+}
+
+
 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
     Isolate* isolate,
     CodeStubInterfaceDescriptor* descriptor) {
@@ -287,16 +297,6 @@
                                            Register rhs);
 
 
-// Check if the operand is a heap number.
-static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
-                                   Register scratch1, Register scratch2,
-                                   Label* not_a_heap_number) {
-  __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
-  __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
-  __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
-}
-
-
 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
   // Update the static counter each time a new code stub is generated.
   Isolate* isolate = masm->isolate();
@@ -321,24 +321,6 @@
 }
 
 
-void ToNumberStub::Generate(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in a0.
-  Label check_heap_number, call_builtin;
-  __ JumpIfNotSmi(a0, &check_heap_number);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-
-  __ bind(&check_heap_number);
-  EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-
-  __ bind(&call_builtin);
-  __ push(a0);
-  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
   // Create a new closure from the given function info in new
   // space. Set the context to the current context in cp.
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 840462e..402f0f5 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -648,6 +648,17 @@
       count() * table_entry_size_);
 }
 
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+  SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+  SetFrameSlot(offset, value);
+}
+
+
 #undef __
 
 
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 29633dd..4ff896d 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -790,14 +790,6 @@
 }
 
 
-void LCodeGen::SoftDeoptimize(LEnvironment* environment,
-                              Register src1,
-                              const Operand& src2) {
-  ASSERT(!info()->IsStub());
-  DeoptimizeIf(al, environment, Deoptimizer::SOFT, src1, src2);
-}
-
-
 void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
   ZoneList<Handle<Map> > maps(1, zone());
   int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -5195,6 +5187,7 @@
 
 
 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+  if (instr->hydrogen()->CanOmitMapChecks()) return;
   Register map_reg = scratch0();
   LOperand* input = instr->value();
   ASSERT(input->IsRegister());
@@ -5263,6 +5256,8 @@
 
 
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+  if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
+
   Register prototype_reg = ToRegister(instr->temp());
   Register map_reg = ToRegister(instr->temp2());
 
@@ -5271,12 +5266,10 @@
 
   ASSERT(prototypes->length() == maps->length());
 
-  if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
-    for (int i = 0; i < prototypes->length(); i++) {
-      __ LoadHeapObject(prototype_reg, prototypes->at(i));
-      __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
-      DoCheckMapCommon(map_reg, maps->at(i), instr->environment());
-    }
+  for (int i = 0; i < prototypes->length(); i++) {
+    __ LoadHeapObject(prototype_reg, prototypes->at(i));
+    __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
+    DoCheckMapCommon(map_reg, maps->at(i), instr->environment());
   }
 }
 
@@ -5324,6 +5317,25 @@
   }
 
   __ bind(deferred->exit());
+
+  if (instr->hydrogen()->MustPrefillWithFiller()) {
+    if (instr->size()->IsConstantOperand()) {
+      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+      __ li(scratch, Operand(size));
+    } else {
+      scratch = ToRegister(instr->size());
+    }
+    __ Subu(scratch, scratch, Operand(kPointerSize));
+    __ Subu(result, result, Operand(kHeapObjectTag));
+    Label loop;
+    __ bind(&loop);
+    __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    __ Addu(at, result, Operand(scratch));
+    __ sw(scratch2, MemOperand(at));
+    __ Subu(scratch, scratch, Operand(kPointerSize));
+    __ Branch(&loop, ge, scratch, Operand(zero_reg));
+    __ Addu(result, result, Operand(kHeapObjectTag));
+  }
 }
 
 
@@ -5630,11 +5642,15 @@
 
 
 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
-  if (instr->hydrogen_value()->IsSoftDeoptimize()) {
-    SoftDeoptimize(instr->environment(), zero_reg, Operand(zero_reg));
-  } else {
-    DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+  Deoptimizer::BailoutType type = instr->hydrogen()->type();
+  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+  // needed return address), even though the implementation of LAZY and EAGER is
+  // now identical. When LAZY is eventually completely folded into EAGER, remove
+  // the special case below.
+  if (info()->IsStub() && type == Deoptimizer::EAGER) {
+    type = Deoptimizer::LAZY;
   }
+  DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
 }
 
 
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index 1cba8cf..f330ee7 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -284,9 +284,6 @@
                     LEnvironment* environment,
                     Register src1 = zero_reg,
                     const Operand& src2 = Operand(zero_reg));
-  void SoftDeoptimize(LEnvironment* environment,
-                      Register src1 = zero_reg,
-                      const Operand& src2 = Operand(zero_reg));
 
   void AddToTranslation(Translation* translation,
                         LOperand* op,
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index c64533c..553dd49 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -706,11 +706,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
-  return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
   return AssignEnvironment(new(zone()) LDeoptimize);
 }
@@ -1956,9 +1951,14 @@
 
 
 LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
-  LUnallocated* temp1 = TempRegister();
-  LOperand* temp2 = TempRegister();
+  LUnallocated* temp1 = NULL;
+  LOperand* temp2 = NULL;
+  if (!instr->CanOmitPrototypeChecks()) {
+    temp1 = TempRegister();
+    temp2 = TempRegister();
+  }
   LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
+  if (instr->CanOmitPrototypeChecks()) return result;
   return AssignEnvironment(result);
 }
 
@@ -1970,8 +1970,10 @@
 
 
 LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* value = NULL;
+  if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value());
   LInstruction* result = new(zone()) LCheckMaps(value);
+  if (instr->CanOmitMapChecks()) return result;
   return AssignEnvironment(result);
 }
 
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 83a37c6..574a836 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -40,12 +40,6 @@
 // Forward declarations.
 class LCodeGen;
 
-#define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
-  V(ControlInstruction)                         \
-  V(Call)                                       \
-  LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
 #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
@@ -72,6 +66,7 @@
   V(CheckFunction)                              \
   V(CheckInstanceType)                          \
   V(CheckMaps)                                  \
+  V(CheckMapValue)                              \
   V(CheckNonSmi)                                \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
@@ -89,14 +84,18 @@
   V(ConstantS)                                  \
   V(ConstantT)                                  \
   V(Context)                                    \
+  V(DateField)                                  \
   V(DebugBreak)                                 \
   V(DeclareGlobals)                             \
   V(Deoptimize)                                 \
   V(DivI)                                       \
   V(DoubleToI)                                  \
   V(DoubleToSmi)                                \
+  V(Drop)                                       \
   V(DummyUse)                                   \
   V(ElementsKind)                               \
+  V(ForInCacheArray)                            \
+  V(ForInPrepareMap)                            \
   V(FunctionLiteral)                            \
   V(GetCachedArrayIndex)                        \
   V(GlobalObject)                               \
@@ -104,13 +103,13 @@
   V(Goto)                                       \
   V(HasCachedArrayIndexAndBranch)               \
   V(HasInstanceTypeAndBranch)                   \
+  V(InnerAllocatedObject)                       \
   V(InstanceOf)                                 \
   V(InstanceOfKnownGlobal)                      \
   V(InstanceSize)                               \
   V(InstructionGap)                             \
   V(Integer32ToDouble)                          \
   V(Integer32ToSmi)                             \
-  V(Uint32ToDouble)                             \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
   V(IsObjectAndBranch)                          \
@@ -123,6 +122,7 @@
   V(LinkObjectInList)                           \
   V(LoadContextSlot)                            \
   V(LoadExternalArrayPointer)                   \
+  V(LoadFieldByIndex)                           \
   V(LoadFunctionPrototype)                      \
   V(LoadGlobalCell)                             \
   V(LoadGlobalGeneric)                          \
@@ -185,17 +185,10 @@
   V(TrapAllocationMemento)                      \
   V(Typeof)                                     \
   V(TypeofIsAndBranch)                          \
+  V(Uint32ToDouble)                             \
   V(UnknownOSRValue)                            \
   V(ValueOf)                                    \
-  V(ForInPrepareMap)                            \
-  V(ForInCacheArray)                            \
-  V(CheckMapValue)                              \
-  V(LoadFieldByIndex)                           \
-  V(DateField)                                  \
-  V(WrapReceiver)                               \
-  V(Drop)                                       \
-  V(InnerAllocatedObject)
-
+  V(WrapReceiver)
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
   virtual Opcode opcode() const { return LInstruction::k##type; } \
@@ -431,6 +424,7 @@
 class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
 
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 8a44185..ea08a55 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -2882,6 +2882,7 @@
                               Register scratch2,
                               Label* gc_required,
                               AllocationFlags flags) {
+  ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -4968,9 +4969,10 @@
     Register scratch1,
     Register scratch2,
     Label* failure) {
-  int kFlatAsciiStringMask =
+  const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+  const int kFlatAsciiStringTag =
+      kStringTag | kOneByteStringTag | kSeqStringTag;
   ASSERT(kFlatAsciiStringTag <= 0xffff);  // Ensure this fits 16-bit immed.
   andi(scratch1, first, kFlatAsciiStringMask);
   Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
@@ -4982,9 +4984,10 @@
 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
                                                             Register scratch,
                                                             Label* failure) {
-  int kFlatAsciiStringMask =
+  const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+  const int kFlatAsciiStringTag =
+      kStringTag | kOneByteStringTag | kSeqStringTag;
   And(scratch, type, Operand(kFlatAsciiStringMask));
   Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
 }
diff --git a/src/objects-inl.h b/src/objects-inl.h
index c12a12a..88ac91c 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -3669,6 +3669,12 @@
 }
 
 
+bool Map::CanOmitMapChecks() {
+  return !HasTransitionArray() && !is_dictionary_map() &&
+         FLAG_omit_map_checks_for_leaf_maps;
+}
+
+
 int DependentCode::number_of_entries(DependencyGroup group) {
   if (length() == 0) return 0;
   return Smi::cast(get(group))->value();
diff --git a/src/objects.cc b/src/objects.cc
index 1967b13..2ecc57b 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -15918,10 +15918,9 @@
                                Handle<Object> value) {
   Isolate* isolate = cell->GetIsolate();
   Handle<Type> old_type(cell->type(), isolate);
-  Handle<Type> new_type((value->IsSmi() || value->IsJSFunction() ||
-                         value->IsUndefined())
-                        ? Type::Constant(value, isolate)
-                        : Type::Any(), isolate);
+  Handle<Type> new_type(value->IsConsString() || value->IsTheHole()
+                        ? Type::Any()
+                        : Type::Constant(value, isolate), isolate);
 
   if (new_type->Is(old_type)) {
     return *old_type;
diff --git a/src/objects.h b/src/objects.h
index f197b23..a567722 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -357,7 +357,6 @@
   V(ODDBALL_TYPE)                                                              \
   V(CELL_TYPE)                                                                 \
   V(PROPERTY_CELL_TYPE)                                                        \
-  V(BOX_TYPE)                                                                  \
                                                                                \
   V(HEAP_NUMBER_TYPE)                                                          \
   V(FOREIGN_TYPE)                                                              \
@@ -395,6 +394,7 @@
   V(POLYMORPHIC_CODE_CACHE_TYPE)                                               \
   V(TYPE_FEEDBACK_INFO_TYPE)                                                   \
   V(ALIASED_ARGUMENTS_ENTRY_TYPE)                                              \
+  V(BOX_TYPE)                                                                  \
                                                                                \
   V(FIXED_ARRAY_TYPE)                                                          \
   V(FIXED_DOUBLE_ARRAY_TYPE)                                                   \
@@ -699,7 +699,6 @@
   ODDBALL_TYPE,
   CELL_TYPE,
   PROPERTY_CELL_TYPE,
-  BOX_TYPE,
 
   // "Data", objects that cannot contain non-map-word pointers to heap
   // objects.
@@ -738,6 +737,7 @@
   POLYMORPHIC_CODE_CACHE_TYPE,
   TYPE_FEEDBACK_INFO_TYPE,
   ALIASED_ARGUMENTS_ENTRY_TYPE,
+  BOX_TYPE,
   // The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
   // is defined. However as include/v8.h contain some of the instance type
   // constants always having them avoids them getting different numbers
@@ -5626,6 +5626,7 @@
   inline void NotifyLeafMapLayoutChange();
 
   inline bool CanOmitPrototypeChecks();
+  inline bool CanOmitMapChecks();
 
   void AddDependentCompilationInfo(DependentCode::DependencyGroup group,
                                    CompilationInfo* info);
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 51321c7..c34330b 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -51,9 +51,6 @@
 namespace v8 {
 namespace internal {
 
-// 0 is never a valid thread id
-static const pthread_t kNoThread = (pthread_t) 0;
-
 
 double ceiling(double x) {
   return ceil(x);
@@ -63,11 +60,6 @@
 static Mutex* limit_mutex = NULL;
 
 
-void OS::PostSetUp() {
-  POSIXPostSetUp();
-}
-
-
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
   return 0;  // Nothing special about Cygwin.
 }
@@ -126,11 +118,6 @@
 }
 
 
-size_t OS::AllocateAlignment() {
-  return sysconf(_SC_PAGESIZE);
-}
-
-
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
@@ -147,48 +134,6 @@
 }
 
 
-void OS::Free(void* address, const size_t size) {
-  // TODO(1240712): munmap has a return value which is ignored here.
-  int result = munmap(address, size);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-void OS::ProtectCode(void* address, const size_t size) {
-  DWORD old_protect;
-  VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-}
-
-
-void OS::Guard(void* address, const size_t size) {
-  DWORD oldprotect;
-  VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
-}
-
-
-void OS::Sleep(int milliseconds) {
-  unsigned int ms = static_cast<unsigned int>(milliseconds);
-  usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
-  return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
-  // Redirect to std abort to signal abnormal program termination.
-  abort();
-}
-
-
-void OS::DebugBreak() {
-  asm("int $3");
-}
-
-
 void OS::DumpBacktrace() {
   // Currently unsupported.
 }
@@ -470,110 +415,6 @@
 }
 
 
-class Thread::PlatformData : public Malloced {
- public:
-  PlatformData() : thread_(kNoThread) {}
-  pthread_t thread_;  // Thread handle for pthread.
-};
-
-
-Thread::Thread(const Options& options)
-    : data_(new PlatformData()),
-      stack_size_(options.stack_size()),
-      start_semaphore_(NULL) {
-  set_name(options.name());
-}
-
-
-Thread::~Thread() {
-  delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
-  Thread* thread = reinterpret_cast<Thread*>(arg);
-  // This is also initialized by the first argument to pthread_create() but we
-  // don't know which thread will run first (the original thread or the new
-  // one) so we initialize it here too.
-  thread->data()->thread_ = pthread_self();
-  ASSERT(thread->data()->thread_ != kNoThread);
-  thread->NotifyStartedAndRun();
-  return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
-  strncpy(name_, name, sizeof(name_));
-  name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
-  pthread_attr_t* attr_ptr = NULL;
-  pthread_attr_t attr;
-  if (stack_size_ > 0) {
-    pthread_attr_init(&attr);
-    pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
-    attr_ptr = &attr;
-  }
-  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
-  ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
-  pthread_join(data_->thread_, NULL);
-}
-
-
-static inline Thread::LocalStorageKey PthreadKeyToLocalKey(
-    pthread_key_t pthread_key) {
-  // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
-  // because pthread_key_t is a pointer type on Cygwin. This will probably not
-  // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
-  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
-  intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
-  return static_cast<Thread::LocalStorageKey>(ptr_key);
-}
-
-
-static inline pthread_key_t LocalKeyToPthreadKey(
-    Thread::LocalStorageKey local_key) {
-  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
-  intptr_t ptr_key = static_cast<intptr_t>(local_key);
-  return reinterpret_cast<pthread_key_t>(ptr_key);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
-  pthread_key_t key;
-  int result = pthread_key_create(&key, NULL);
-  USE(result);
-  ASSERT(result == 0);
-  return PthreadKeyToLocalKey(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
-  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
-  int result = pthread_key_delete(pthread_key);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
-  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
-  return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
-  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
-  pthread_setspecific(pthread_key, value);
-}
-
-
 class CygwinSemaphore : public Semaphore {
  public:
   explicit CygwinSemaphore(int count) {  sem_init(&sem_, 0, count); }
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index c771cd3..d7544db 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -62,10 +62,6 @@
 namespace v8 {
 namespace internal {
 
-// 0 is never a valid thread id on FreeBSD since tids and pids share a
-// name space and pid 0 is used to kill the group (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
 
 double ceiling(double x) {
     // Correct as on OS X
@@ -80,11 +76,6 @@
 static Mutex* limit_mutex = NULL;
 
 
-void OS::PostSetUp() {
-  POSIXPostSetUp();
-}
-
-
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
   return 0;  // FreeBSD runs on anything.
 }
@@ -139,11 +130,6 @@
 }
 
 
-size_t OS::AllocateAlignment() {
-  return getpagesize();
-}
-
-
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool executable) {
@@ -161,40 +147,6 @@
 }
 
 
-void OS::Free(void* buf, const size_t length) {
-  // TODO(1240712): munmap has a return value which is ignored here.
-  int result = munmap(buf, length);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
-  unsigned int ms = static_cast<unsigned int>(milliseconds);
-  usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
-  return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
-  // Redirect to std abort to signal abnormal program termination.
-  abort();
-}
-
-
-void OS::DebugBreak() {
-#if (defined(__arm__) || defined(__thumb__))
-  asm("bkpt 0");
-#else
-  asm("int $3");
-#endif
-}
-
-
 void OS::DumpBacktrace() {
   POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
 }
@@ -441,90 +393,6 @@
 }
 
 
-class Thread::PlatformData : public Malloced {
- public:
-  pthread_t thread_;  // Thread handle for pthread.
-};
-
-
-Thread::Thread(const Options& options)
-    : data_(new PlatformData),
-      stack_size_(options.stack_size()),
-      start_semaphore_(NULL) {
-  set_name(options.name());
-}
-
-
-Thread::~Thread() {
-  delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
-  Thread* thread = reinterpret_cast<Thread*>(arg);
-  // This is also initialized by the first argument to pthread_create() but we
-  // don't know which thread will run first (the original thread or the new
-  // one) so we initialize it here too.
-  thread->data()->thread_ = pthread_self();
-  ASSERT(thread->data()->thread_ != kNoThread);
-  thread->NotifyStartedAndRun();
-  return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
-  strncpy(name_, name, sizeof(name_));
-  name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
-  pthread_attr_t* attr_ptr = NULL;
-  pthread_attr_t attr;
-  if (stack_size_ > 0) {
-    pthread_attr_init(&attr);
-    pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
-    attr_ptr = &attr;
-  }
-  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
-  ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
-  pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
-  pthread_key_t key;
-  int result = pthread_key_create(&key, NULL);
-  USE(result);
-  ASSERT(result == 0);
-  return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  int result = pthread_key_delete(pthread_key);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  pthread_setspecific(pthread_key, value);
-}
-
-
 class FreeBSDSemaphore : public Semaphore {
  public:
   explicit FreeBSDSemaphore(int count) {  sem_init(&sem_, 0, count); }
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 613d243..245dae4 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -75,10 +75,6 @@
 namespace v8 {
 namespace internal {
 
-// 0 is never a valid thread id on Linux since tids and pids share a
-// name space and pid 0 is reserved (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
 
 double ceiling(double x) {
   return ceil(x);
@@ -88,11 +84,6 @@
 static Mutex* limit_mutex = NULL;
 
 
-void OS::PostSetUp() {
-  POSIXPostSetUp();
-}
-
-
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
   return 0;  // Linux runs on anything.
 }
@@ -384,11 +375,6 @@
 }
 
 
-size_t OS::AllocateAlignment() {
-  return sysconf(_SC_PAGESIZE);
-}
-
-
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
@@ -407,49 +393,6 @@
 }
 
 
-void OS::Free(void* address, const size_t size) {
-  // TODO(1240712): munmap has a return value which is ignored here.
-  int result = munmap(address, size);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
-  unsigned int ms = static_cast<unsigned int>(milliseconds);
-  usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
-  return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
-  // Redirect to std abort to signal abnormal program termination.
-  if (FLAG_break_on_abort) {
-    DebugBreak();
-  }
-  abort();
-}
-
-
-void OS::DebugBreak() {
-// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
-//  which is the architecture of generated code).
-#if (defined(__arm__) || defined(__thumb__))
-  asm("bkpt 0");
-#elif defined(__mips__)
-  asm("break");
-#elif defined(__native_client__)
-  asm("hlt");
-#else
-  asm("int $3");
-#endif
-}
-
-
 void OS::DumpBacktrace() {
   // backtrace is a glibc extension.
 #if defined(__GLIBC__) && !defined(__UCLIBC__)
@@ -764,101 +707,6 @@
 }
 
 
-class Thread::PlatformData : public Malloced {
- public:
-  PlatformData() : thread_(kNoThread) {}
-
-  pthread_t thread_;  // Thread handle for pthread.
-};
-
-Thread::Thread(const Options& options)
-    : data_(new PlatformData()),
-      stack_size_(options.stack_size()),
-      start_semaphore_(NULL) {
-  set_name(options.name());
-}
-
-
-Thread::~Thread() {
-  delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
-  Thread* thread = reinterpret_cast<Thread*>(arg);
-  // This is also initialized by the first argument to pthread_create() but we
-  // don't know which thread will run first (the original thread or the new
-  // one) so we initialize it here too.
-#ifdef PR_SET_NAME
-  prctl(PR_SET_NAME,
-        reinterpret_cast<unsigned long>(thread->name()),  // NOLINT
-        0, 0, 0);
-#endif
-  thread->data()->thread_ = pthread_self();
-  ASSERT(thread->data()->thread_ != kNoThread);
-  thread->NotifyStartedAndRun();
-  return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
-  strncpy(name_, name, sizeof(name_));
-  name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
-  pthread_attr_t* attr_ptr = NULL;
-#if defined(__native_client__)
-  // use default stack size.
-#else
-  pthread_attr_t attr;
-  if (stack_size_ > 0) {
-    pthread_attr_init(&attr);
-    pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
-    attr_ptr = &attr;
-  }
-#endif
-  int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
-  CHECK_EQ(0, result);
-  ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
-  pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
-  pthread_key_t key;
-  int result = pthread_key_create(&key, NULL);
-  USE(result);
-  ASSERT(result == 0);
-  return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  int result = pthread_key_delete(pthread_key);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  pthread_setspecific(pthread_key, value);
-}
-
-
 class LinuxSemaphore : public Semaphore {
  public:
   explicit LinuxSemaphore(int count) {  sem_init(&sem_, 0, count); }
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 097691b..12a3e66 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -78,10 +78,6 @@
 namespace v8 {
 namespace internal {
 
-// 0 is never a valid thread id on MacOSX since a pthread_t is
-// a pointer.
-static const pthread_t kNoThread = (pthread_t) 0;
-
 
 double ceiling(double x) {
   // Correct Mac OS X Leopard 'ceil' behavior.
@@ -96,11 +92,6 @@
 static Mutex* limit_mutex = NULL;
 
 
-void OS::PostSetUp() {
-  POSIXPostSetUp();
-}
-
-
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
 // and verification).  The estimate is conservative, i.e., not all addresses in
@@ -126,11 +117,6 @@
 }
 
 
-size_t OS::AllocateAlignment() {
-  return getpagesize();
-}
-
-
 // Constants used for mmap.
 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
 // defined tag 255 This helps identify V8-allocated regions in memory analysis
@@ -160,35 +146,6 @@
 }
 
 
-void OS::Free(void* address, const size_t size) {
-  // TODO(1240712): munmap has a return value which is ignored here.
-  int result = munmap(address, size);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
-  usleep(1000 * milliseconds);
-}
-
-
-int OS::NumberOfCores() {
-  return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
-  // Redirect to std abort to signal abnormal program termination
-  abort();
-}
-
-
-void OS::DebugBreak() {
-  asm("int $3");
-}
-
-
 void OS::DumpBacktrace() {
   // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
   if (backtrace == NULL) return;
@@ -460,177 +417,6 @@
 }
 
 
-class Thread::PlatformData : public Malloced {
- public:
-  PlatformData() : thread_(kNoThread) {}
-  pthread_t thread_;  // Thread handle for pthread.
-};
-
-
-Thread::Thread(const Options& options)
-    : data_(new PlatformData),
-      stack_size_(options.stack_size()),
-      start_semaphore_(NULL) {
-  set_name(options.name());
-}
-
-
-Thread::~Thread() {
-  delete data_;
-}
-
-
-static void SetThreadName(const char* name) {
-  // pthread_setname_np is only available in 10.6 or later, so test
-  // for it at runtime.
-  int (*dynamic_pthread_setname_np)(const char*);
-  *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
-    dlsym(RTLD_DEFAULT, "pthread_setname_np");
-  if (!dynamic_pthread_setname_np)
-    return;
-
-  // Mac OS X does not expose the length limit of the name, so hardcode it.
-  static const int kMaxNameLength = 63;
-  USE(kMaxNameLength);
-  ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
-  dynamic_pthread_setname_np(name);
-}
-
-
-static void* ThreadEntry(void* arg) {
-  Thread* thread = reinterpret_cast<Thread*>(arg);
-  // This is also initialized by the first argument to pthread_create() but we
-  // don't know which thread will run first (the original thread or the new
-  // one) so we initialize it here too.
-  thread->data()->thread_ = pthread_self();
-  SetThreadName(thread->name());
-  ASSERT(thread->data()->thread_ != kNoThread);
-  thread->NotifyStartedAndRun();
-  return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
-  strncpy(name_, name, sizeof(name_));
-  name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
-  pthread_attr_t* attr_ptr = NULL;
-  pthread_attr_t attr;
-  if (stack_size_ > 0) {
-    pthread_attr_init(&attr);
-    pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
-    attr_ptr = &attr;
-  }
-  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
-  ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
-  pthread_join(data_->thread_, NULL);
-}
-
-
-#ifdef V8_FAST_TLS_SUPPORTED
-
-static Atomic32 tls_base_offset_initialized = 0;
-intptr_t kMacTlsBaseOffset = 0;
-
-// It's safe to do the initialization more that once, but it has to be
-// done at least once.
-static void InitializeTlsBaseOffset() {
-  const size_t kBufferSize = 128;
-  char buffer[kBufferSize];
-  size_t buffer_size = kBufferSize;
-  int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
-  if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
-    V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
-  }
-  // The buffer now contains a string of the form XX.YY.ZZ, where
-  // XX is the major kernel version component.
-  // Make sure the buffer is 0-terminated.
-  buffer[kBufferSize - 1] = '\0';
-  char* period_pos = strchr(buffer, '.');
-  *period_pos = '\0';
-  int kernel_version_major =
-      static_cast<int>(strtol(buffer, NULL, 10));  // NOLINT
-  // The constants below are taken from pthreads.s from the XNU kernel
-  // sources archive at www.opensource.apple.com.
-  if (kernel_version_major < 11) {
-    // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
-    // same offsets.
-#if V8_HOST_ARCH_IA32
-    kMacTlsBaseOffset = 0x48;
-#else
-    kMacTlsBaseOffset = 0x60;
-#endif
-  } else {
-    // 11.x.x (Lion) changed the offset.
-    kMacTlsBaseOffset = 0;
-  }
-
-  Release_Store(&tls_base_offset_initialized, 1);
-}
-
-
-static void CheckFastTls(Thread::LocalStorageKey key) {
-  void* expected = reinterpret_cast<void*>(0x1234CAFE);
-  Thread::SetThreadLocal(key, expected);
-  void* actual = Thread::GetExistingThreadLocal(key);
-  if (expected != actual) {
-    V8_Fatal(__FILE__, __LINE__,
-             "V8 failed to initialize fast TLS on current kernel");
-  }
-  Thread::SetThreadLocal(key, NULL);
-}
-
-#endif  // V8_FAST_TLS_SUPPORTED
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
-#ifdef V8_FAST_TLS_SUPPORTED
-  bool check_fast_tls = false;
-  if (tls_base_offset_initialized == 0) {
-    check_fast_tls = true;
-    InitializeTlsBaseOffset();
-  }
-#endif
-  pthread_key_t key;
-  int result = pthread_key_create(&key, NULL);
-  USE(result);
-  ASSERT(result == 0);
-  LocalStorageKey typed_key = static_cast<LocalStorageKey>(key);
-#ifdef V8_FAST_TLS_SUPPORTED
-  // If we just initialized fast TLS support, make sure it works.
-  if (check_fast_tls) CheckFastTls(typed_key);
-#endif
-  return typed_key;
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  int result = pthread_key_delete(pthread_key);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  pthread_setspecific(pthread_key, value);
-}
-
-
 class MacOSSemaphore : public Semaphore {
  public:
   explicit MacOSSemaphore(int count) {
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index a40df48..4340094 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -100,11 +100,6 @@
 }
 
 
-void OS::PostSetUp() {
-  POSIXPostSetUp();
-}
-
-
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
   return 0;
 }
@@ -160,11 +155,6 @@
 }
 
 
-size_t OS::AllocateAlignment() {
-  return sysconf(_SC_PAGESIZE);
-}
-
-
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
@@ -183,36 +173,6 @@
 }
 
 
-void OS::Free(void* address, const size_t size) {
-  // TODO(1240712): munmap has a return value which is ignored here.
-  int result = munmap(address, size);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
-  unsigned int ms = static_cast<unsigned int>(milliseconds);
-  usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
-  return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
-  // Redirect to std abort to signal abnormal program termination.
-  abort();
-}
-
-
-void OS::DebugBreak() {
-  asm("int $3");
-}
-
-
 void OS::DumpBacktrace() {
   // Currently unsupported.
 }
@@ -517,96 +477,6 @@
 }
 
 
-class Thread::PlatformData : public Malloced {
- public:
-  PlatformData() : thread_(kNoThread) {}
-
-  pthread_t thread_;  // Thread handle for pthread.
-};
-
-Thread::Thread(const Options& options)
-    : data_(new PlatformData()),
-      stack_size_(options.stack_size()),
-      start_semaphore_(NULL) {
-  set_name(options.name());
-}
-
-
-Thread::~Thread() {
-  delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
-  Thread* thread = reinterpret_cast<Thread*>(arg);
-  // This is also initialized by the first argument to pthread_create() but we
-  // don't know which thread will run first (the original thread or the new
-  // one) so we initialize it here too.
-#ifdef PR_SET_NAME
-  prctl(PR_SET_NAME,
-        reinterpret_cast<unsigned long>(thread->name()),  // NOLINT
-        0, 0, 0);
-#endif
-  thread->data()->thread_ = pthread_self();
-  ASSERT(thread->data()->thread_ != kNoThread);
-  thread->NotifyStartedAndRun();
-  return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
-  strncpy(name_, name, sizeof(name_));
-  name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
-  pthread_attr_t* attr_ptr = NULL;
-  pthread_attr_t attr;
-  if (stack_size_ > 0) {
-    pthread_attr_init(&attr);
-    pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
-    attr_ptr = &attr;
-  }
-  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
-  ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
-  pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
-  pthread_key_t key;
-  int result = pthread_key_create(&key, NULL);
-  USE(result);
-  ASSERT(result == 0);
-  return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  int result = pthread_key_delete(pthread_key);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  pthread_setspecific(pthread_key, value);
-}
-
-
 class OpenBSDSemaphore : public Semaphore {
  public:
   explicit OpenBSDSemaphore(int count) {  sem_init(&sem_, 0, count); }
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 9d3d769..ff5f70a 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -31,7 +31,11 @@
 
 #include "platform-posix.h"
 
+#include <dlfcn.h>
 #include <pthread.h>
+#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+#include <pthread_np.h>  // for pthread_set_name_np
+#endif
 #include <sched.h>  // for sched_yield
 #include <unistd.h>
 #include <errno.h>
@@ -43,6 +47,13 @@
 #include <sys/time.h>
 #include <sys/types.h>
 #include <sys/stat.h>
+#if defined(__linux__)
+#include <sys/prctl.h>  // for prctl
+#endif
+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
+    defined(__NetBSD__) || defined(__OpenBSD__)
+#include <sys/sysctl.h>  // for sysctl
+#endif
 
 #include <arpa/inet.h>
 #include <netinet/in.h>
@@ -63,6 +74,9 @@
 namespace v8 {
 namespace internal {
 
+// 0 is never a valid thread id.
+static const pthread_t kNoThread = (pthread_t) 0;
+
 
 // Maximum size of the virtual memory.  0 means there is no artificial
 // limit.
@@ -81,10 +95,20 @@
 }
 
 
-#ifndef __CYGWIN__
+void OS::Free(void* address, const size_t size) {
+  // TODO(1240712): munmap has a return value which is ignored here.
+  int result = munmap(address, size);
+  USE(result);
+  ASSERT(result == 0);
+}
+
+
 // Get rid of writable permission on code allocations.
 void OS::ProtectCode(void* address, const size_t size) {
-#if defined(__native_client__)
+#if defined(__CYGWIN__)
+  DWORD old_protect;
+  VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
+#elif defined(__native_client__)
   // The Native Client port of V8 uses an interpreter, so
   // code pages don't need PROT_EXEC.
   mprotect(address, size, PROT_READ);
@@ -96,9 +120,13 @@
 
 // Create guard pages.
 void OS::Guard(void* address, const size_t size) {
+#if defined(__CYGWIN__)
+  DWORD oldprotect;
+  VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
+#else
   mprotect(address, size, PROT_NONE);
+#endif
 }
-#endif  // __CYGWIN__
 
 
 void* OS::GetRandomMmapAddr() {
@@ -135,6 +163,50 @@
 }
 
 
+size_t OS::AllocateAlignment() {
+  return getpagesize();
+}
+
+
+void OS::Sleep(int milliseconds) {
+  useconds_t ms = static_cast<useconds_t>(milliseconds);
+  usleep(1000 * ms);
+}
+
+
+int OS::NumberOfCores() {
+  return sysconf(_SC_NPROCESSORS_ONLN);
+}
+
+
+void OS::Abort() {
+  // Redirect to std abort to signal abnormal program termination.
+  if (FLAG_break_on_abort) {
+    DebugBreak();
+  }
+  abort();
+}
+
+
+void OS::DebugBreak() {
+#if V8_HOST_ARCH_ARM
+  asm("bkpt 0");
+#elif V8_HOST_ARCH_MIPS
+  asm("break");
+#elif V8_HOST_ARCH_IA32
+#if defined(__native_client__)
+  asm("hlt");
+#else
+  asm("int $3");
+#endif  // __native_client__
+#elif V8_HOST_ARCH_X64
+  asm("int $3");
+#else
+#error Unsupported host architecture.
+#endif
+}
+
+
 // ----------------------------------------------------------------------------
 // Math functions
 
@@ -159,7 +231,7 @@
 UNARY_MATH_FUNCTION(exp, CreateExpFunction())
 UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
 
-#undef MATH_FUNCTION
+#undef UNARY_MATH_FUNCTION
 
 
 void lazily_initialize_fast_exp() {
@@ -371,7 +443,7 @@
 #endif
 
 
-void POSIXPostSetUp() {
+void OS::PostSetUp() {
 #if V8_TARGET_ARCH_IA32
   OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
   if (generated_memmove != NULL) {
@@ -410,8 +482,229 @@
 // POSIX thread support.
 //
 
+class Thread::PlatformData : public Malloced {
+ public:
+  PlatformData() : thread_(kNoThread) {}
+  pthread_t thread_;  // Thread handle for pthread.
+};
+
+Thread::Thread(const Options& options)
+    : data_(new PlatformData),
+      stack_size_(options.stack_size()),
+      start_semaphore_(NULL) {
+  set_name(options.name());
+}
+
+
+Thread::~Thread() {
+  delete data_;
+}
+
+
+static void SetThreadName(const char* name) {
+  int result = 0;
+#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+  result = pthread_set_name_np(pthread_self(), name);
+#elif defined(__NetBSD__)
+  STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
+  result = pthread_setname_np(pthread_self(), "%s", name);
+#elif defined(__APPLE__)
+  // pthread_setname_np is only available in 10.6 or later, so test
+  // for it at runtime.
+  int (*dynamic_pthread_setname_np)(const char*);
+  *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
+    dlsym(RTLD_DEFAULT, "pthread_setname_np");
+  if (dynamic_pthread_setname_np == NULL)
+    return;
+
+  // Mac OS X does not expose the length limit of the name, so hardcode it.
+  static const int kMaxNameLength = 63;
+  STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
+  result = dynamic_pthread_setname_np(name);
+#elif defined(PR_SET_NAME)
+  result = prctl(PR_SET_NAME,
+                 reinterpret_cast<unsigned long>(name),  // NOLINT
+                 0, 0, 0);
+#endif
+  ASSERT_EQ(0, result);
+  USE(result);
+}
+
+
+static void* ThreadEntry(void* arg) {
+  Thread* thread = reinterpret_cast<Thread*>(arg);
+  // This is also initialized by the first argument to pthread_create() but we
+  // don't know which thread will run first (the original thread or the new
+  // one) so we initialize it here too.
+  thread->data()->thread_ = pthread_self();
+  SetThreadName(thread->name());
+  ASSERT(thread->data()->thread_ != kNoThread);
+  thread->NotifyStartedAndRun();
+  return NULL;
+}
+
+
+void Thread::set_name(const char* name) {
+  strncpy(name_, name, sizeof(name_));
+  name_[sizeof(name_) - 1] = '\0';
+}
+
+
+void Thread::Start() {
+  int result;
+  pthread_attr_t attr;
+  memset(&attr, 0, sizeof(attr));
+  result = pthread_attr_init(&attr);
+  ASSERT_EQ(0, result);
+  // Native client uses default stack size.
+#if !defined(__native_client__)
+  if (stack_size_ > 0) {
+    result = pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
+    ASSERT_EQ(0, result);
+  }
+#endif
+  result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
+  ASSERT_EQ(0, result);
+  result = pthread_attr_destroy(&attr);
+  ASSERT_EQ(0, result);
+  ASSERT(data_->thread_ != kNoThread);
+  USE(result);
+}
+
+
+void Thread::Join() {
+  pthread_join(data_->thread_, NULL);
+}
+
+
 void Thread::YieldCPU() {
-  sched_yield();
+  int result = sched_yield();
+  ASSERT_EQ(0, result);
+  USE(result);
+}
+
+
+static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
+#if defined(__CYGWIN__)
+  // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
+  // because pthread_key_t is a pointer type on Cygwin. This will probably not
+  // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
+  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
+  intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
+  return static_cast<Thread::LocalStorageKey>(ptr_key);
+#else
+  return static_cast<Thread::LocalStorageKey>(pthread_key);
+#endif
+}
+
+
+static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
+#if defined(__CYGWIN__)
+  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
+  intptr_t ptr_key = static_cast<intptr_t>(local_key);
+  return reinterpret_cast<pthread_key_t>(ptr_key);
+#else
+  return static_cast<pthread_key_t>(local_key);
+#endif
+}
+
+
+#ifdef V8_FAST_TLS_SUPPORTED
+
+static Atomic32 tls_base_offset_initialized = 0;
+intptr_t kMacTlsBaseOffset = 0;
+
+// It's safe to do the initialization more that once, but it has to be
+// done at least once.
+static void InitializeTlsBaseOffset() {
+  const size_t kBufferSize = 128;
+  char buffer[kBufferSize];
+  size_t buffer_size = kBufferSize;
+  int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
+  if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
+    V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
+  }
+  // The buffer now contains a string of the form XX.YY.ZZ, where
+  // XX is the major kernel version component.
+  // Make sure the buffer is 0-terminated.
+  buffer[kBufferSize - 1] = '\0';
+  char* period_pos = strchr(buffer, '.');
+  *period_pos = '\0';
+  int kernel_version_major =
+      static_cast<int>(strtol(buffer, NULL, 10));  // NOLINT
+  // The constants below are taken from pthreads.s from the XNU kernel
+  // sources archive at www.opensource.apple.com.
+  if (kernel_version_major < 11) {
+    // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
+    // same offsets.
+#if V8_HOST_ARCH_IA32
+    kMacTlsBaseOffset = 0x48;
+#else
+    kMacTlsBaseOffset = 0x60;
+#endif
+  } else {
+    // 11.x.x (Lion) changed the offset.
+    kMacTlsBaseOffset = 0;
+  }
+
+  Release_Store(&tls_base_offset_initialized, 1);
+}
+
+
+static void CheckFastTls(Thread::LocalStorageKey key) {
+  void* expected = reinterpret_cast<void*>(0x1234CAFE);
+  Thread::SetThreadLocal(key, expected);
+  void* actual = Thread::GetExistingThreadLocal(key);
+  if (expected != actual) {
+    V8_Fatal(__FILE__, __LINE__,
+             "V8 failed to initialize fast TLS on current kernel");
+  }
+  Thread::SetThreadLocal(key, NULL);
+}
+
+#endif  // V8_FAST_TLS_SUPPORTED
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+#ifdef V8_FAST_TLS_SUPPORTED
+  bool check_fast_tls = false;
+  if (tls_base_offset_initialized == 0) {
+    check_fast_tls = true;
+    InitializeTlsBaseOffset();
+  }
+#endif
+  pthread_key_t key;
+  int result = pthread_key_create(&key, NULL);
+  ASSERT_EQ(0, result);
+  USE(result);
+  LocalStorageKey local_key = PthreadKeyToLocalKey(key);
+#ifdef V8_FAST_TLS_SUPPORTED
+  // If we just initialized fast TLS support, make sure it works.
+  if (check_fast_tls) CheckFastTls(local_key);
+#endif
+  return local_key;
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+  int result = pthread_key_delete(pthread_key);
+  ASSERT_EQ(0, result);
+  USE(result);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+  return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+  int result = pthread_setspecific(pthread_key, value);
+  ASSERT_EQ(0, result);
+  USE(result);
 }
 
 
diff --git a/src/platform-posix.h b/src/platform-posix.h
index bcc2b7e..6b73387 100644
--- a/src/platform-posix.h
+++ b/src/platform-posix.h
@@ -38,9 +38,6 @@
 namespace v8 {
 namespace internal {
 
-// Used by platform implementation files during OS::PostSetUp().
-void POSIXPostSetUp();
-
 // Used by platform implementation files during OS::DumpBacktrace()
 // and OS::StackWalk().
 template<int (*backtrace)(void**, int),
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 3c4df66..1ecca16 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -81,11 +81,6 @@
 namespace internal {
 
 
-// 0 is never a valid thread id on Solaris since the main thread is 1 and
-// subsequent have their ids incremented from there
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
 double ceiling(double x) {
   return ceil(x);
 }
@@ -94,11 +89,6 @@
 static Mutex* limit_mutex = NULL;
 
 
-void OS::PostSetUp() {
-  POSIXPostSetUp();
-}
-
-
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
   return 0;  // Solaris runs on a lot of things.
 }
@@ -150,11 +140,6 @@
 }
 
 
-size_t OS::AllocateAlignment() {
-  return static_cast<size_t>(getpagesize());
-}
-
-
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
@@ -172,36 +157,6 @@
 }
 
 
-void OS::Free(void* address, const size_t size) {
-  // TODO(1240712): munmap has a return value which is ignored here.
-  int result = munmap(address, size);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
-  useconds_t ms = static_cast<useconds_t>(milliseconds);
-  usleep(1000 * ms);
-}
-
-
-int OS::NumberOfCores() {
-  return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
-void OS::Abort() {
-  // Redirect to std abort to signal abnormal program termination.
-  abort();
-}
-
-
-void OS::DebugBreak() {
-  asm("int $3");
-}
-
-
 void OS::DumpBacktrace() {
   // Currently unsupported.
 }
@@ -454,90 +409,6 @@
 }
 
 
-class Thread::PlatformData : public Malloced {
- public:
-  PlatformData() : thread_(kNoThread) {  }
-
-  pthread_t thread_;  // Thread handle for pthread.
-};
-
-
-Thread::Thread(const Options& options)
-    : data_(new PlatformData()),
-      stack_size_(options.stack_size()),
-      start_semaphore_(NULL) {
-  set_name(options.name());
-}
-
-
-Thread::~Thread() {
-  delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
-  Thread* thread = reinterpret_cast<Thread*>(arg);
-  // This is also initialized by the first argument to pthread_create() but we
-  // don't know which thread will run first (the original thread or the new
-  // one) so we initialize it here too.
-  thread->data()->thread_ = pthread_self();
-  ASSERT(thread->data()->thread_ != kNoThread);
-  thread->NotifyStartedAndRun();
-  return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
-  strncpy(name_, name, sizeof(name_));
-  name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
-  pthread_attr_t attr;
-  if (stack_size_ > 0) {
-    pthread_attr_init(&attr);
-    pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
-  }
-  pthread_create(&data_->thread_, NULL, ThreadEntry, this);
-  ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
-  pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
-  pthread_key_t key;
-  int result = pthread_key_create(&key, NULL);
-  USE(result);
-  ASSERT(result == 0);
-  return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  int result = pthread_key_delete(pthread_key);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  pthread_setspecific(pthread_key, value);
-}
-
-
 class SolarisSemaphore : public Semaphore {
  public:
   explicit SolarisSemaphore(int count) {  sem_init(&sem_, 0, count); }
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index cc86724..8428303 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -655,7 +655,8 @@
   CodeEntry** entry = entries.start();
   memset(entry, 0, entries.length() * sizeof(*entry));
   if (sample.pc != NULL) {
-    if (sample.has_external_callback) {
+    if (sample.has_external_callback && sample.state == EXTERNAL &&
+        sample.top_frame_type == StackFrame::EXIT) {
       // Don't use PC when in external callback code, as it can point
       // inside callback's code, and we will erroneously report
       // that a callback calls itself.
diff --git a/src/runtime.cc b/src/runtime.cc
index c36d453..0c88cae 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -13345,20 +13345,6 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) {
-  SealHandleScope shs(isolate);
-  v8::V8::ResumeProfiler();
-  return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) {
-  SealHandleScope shs(isolate);
-  v8::V8::PauseProfiler();
-  return isolate->heap()->undefined_value();
-}
-
-
 // Finds the script object from the script data. NOTE: This operation uses
 // heap traversal to find the function generated for the source position
 // for the requested break point. For lazily compiled functions several heap
diff --git a/src/runtime.h b/src/runtime.h
index a8c10d9..2fe9b0e 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -467,10 +467,7 @@
   F(TransitionElementsKind, 2, 1) \
   F(TransitionElementsSmiToDouble, 1, 1) \
   F(TransitionElementsDoubleToObject, 1, 1) \
-  F(HaveSameMap, 2, 1) \
-  /* profiler */ \
-  F(ProfilerResume, 0, 1) \
-  F(ProfilerPause, 0, 1)
+  F(HaveSameMap, 2, 1)
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/src/sampler.cc b/src/sampler.cc
index 222b318..d72ed1a 100644
--- a/src/sampler.cc
+++ b/src/sampler.cc
@@ -69,6 +69,7 @@
 #include "platform.h"
 #include "simulator.h"
 #include "v8threads.h"
+#include "vm-state-inl.h"
 
 
 #if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)
@@ -621,9 +622,13 @@
     return;
   }
 
-  const Address callback = isolate->external_callback();
-  if (callback != NULL) {
-    external_callback = callback;
+  ExternalCallbackScope* scope = isolate->external_callback_scope();
+  Address handler = Isolate::handler(isolate->thread_local_top());
+  // If there is a handler on top of the external callback scope then
+  // we have already entrered JavaScript again and the external callback
+  // is not the top function.
+  if (scope && scope->scope_address() < handler) {
+    external_callback = scope->callback();
     has_external_callback = true;
   } else {
     // Sample potential return address value for frameless invocation of
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 436cd46..d554d0c 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -563,16 +563,15 @@
       Code::STORE_IC, Code::NORMAL, stub.GetExtraICState());
   if (!code.is_null()) return code;
 
-  if (is_constant) return stub.GetCode(isolate_);
-
   // Replace the placeholder cell and global object map with the actual global
   // cell and receiver map.
-  Handle<Map> cell_map(isolate_->heap()->global_property_cell_map());
   Handle<Map> meta_map(isolate_->heap()->meta_map());
   Handle<Object> receiver_map(receiver->map(), isolate_);
   code = stub.GetCodeCopyFromTemplate(isolate_);
   code->ReplaceNthObject(1, *meta_map, *receiver_map);
+  Handle<Map> cell_map(isolate_->heap()->global_property_cell_map());
   code->ReplaceNthObject(1, *cell_map, *cell);
+
   JSObject::UpdateMapCodeCache(receiver, name, code);
 
   return code;
diff --git a/src/types.cc b/src/types.cc
index 8bf9129..bea140e 100644
--- a/src/types.cc
+++ b/src/types.cc
@@ -206,6 +206,7 @@
       case DECLARED_ACCESSOR_INFO_TYPE:
       case EXECUTABLE_ACCESSOR_INFO_TYPE:
       case ACCESSOR_PAIR_TYPE:
+      case FIXED_ARRAY_TYPE:
         return kInternal;
       default:
         UNREACHABLE();
diff --git a/src/version.cc b/src/version.cc
index 41ac37f..49d7ff9 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     20
-#define BUILD_NUMBER      7
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      8
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/vm-state-inl.h b/src/vm-state-inl.h
index 862c17e..658773e 100644
--- a/src/vm-state-inl.h
+++ b/src/vm-state-inl.h
@@ -29,7 +29,8 @@
 #define V8_VM_STATE_INL_H_
 
 #include "vm-state.h"
-#include "runtime-profiler.h"
+#include "log.h"
+#include "simulator.h"
 
 namespace v8 {
 namespace internal {
@@ -80,12 +81,26 @@
 
 
 ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
-    : isolate_(isolate), previous_callback_(isolate->external_callback()) {
-  isolate_->set_external_callback(callback);
+    : isolate_(isolate),
+      callback_(callback),
+      previous_scope_(isolate->external_callback_scope()) {
+#ifdef USE_SIMULATOR
+  int32_t sp = Simulator::current(isolate)->get_register(Simulator::sp);
+  scope_address_ = reinterpret_cast<Address>(static_cast<intptr_t>(sp));
+#endif
+  isolate_->set_external_callback_scope(this);
 }
 
 ExternalCallbackScope::~ExternalCallbackScope() {
-  isolate_->set_external_callback(previous_callback_);
+  isolate_->set_external_callback_scope(previous_scope_);
+}
+
+Address ExternalCallbackScope::scope_address() {
+#ifdef USE_SIMULATOR
+  return scope_address_;
+#else
+  return reinterpret_cast<Address>(this);
+#endif
 }
 
 
diff --git a/src/vm-state.h b/src/vm-state.h
index 765b570..f592bb9 100644
--- a/src/vm-state.h
+++ b/src/vm-state.h
@@ -50,9 +50,18 @@
  public:
   inline ExternalCallbackScope(Isolate* isolate, Address callback);
   inline ~ExternalCallbackScope();
+  Address callback() { return callback_; }
+  Address* callback_address() { return &callback_; }
+  ExternalCallbackScope* previous() { return previous_scope_; }
+  inline Address scope_address();
+
  private:
   Isolate* isolate_;
-  Address previous_callback_;
+  Address callback_;
+  ExternalCallbackScope* previous_scope_;
+#ifdef USE_SIMULATOR
+  Address scope_address_;
+#endif
 };
 
 } }  // namespace v8::internal
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index e090437..551a716 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -39,6 +39,16 @@
 namespace internal {
 
 
+void ToNumberStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { rax };
+  descriptor->register_param_count_ = 1;
+  descriptor->register_params_ = registers;
+  descriptor->deoptimization_handler_ = NULL;
+}
+
+
 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
     Isolate* isolate,
     CodeStubInterfaceDescriptor* descriptor) {
@@ -296,26 +306,6 @@
 }
 
 
-void ToNumberStub::Generate(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in rax.
-  Label check_heap_number, call_builtin;
-  __ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
-  __ Ret();
-
-  __ bind(&check_heap_number);
-  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &call_builtin, Label::kNear);
-  __ Ret();
-
-  __ bind(&call_builtin);
-  __ pop(rcx);  // Pop return address.
-  __ push(rax);
-  __ push(rcx);  // Push return address.
-  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
   // Create a new closure from the given function info in new
   // space. Set the context to the current context in rsi.
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index d7a73d7..a41cddf 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -610,6 +610,17 @@
   __ bind(&done);
 }
 
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+  SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+  SetFrameSlot(offset, value);
+}
+
+
 #undef __
 
 
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index c9b808c..0da9337 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -697,12 +697,6 @@
 }
 
 
-void LCodeGen::SoftDeoptimize(LEnvironment* environment) {
-  ASSERT(!info()->IsStub());
-  DeoptimizeIf(no_condition, environment, Deoptimizer::SOFT);
-}
-
-
 void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
   ZoneList<Handle<Map> > maps(1, zone());
   int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -4954,6 +4948,7 @@
 
 
 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+  if (instr->hydrogen()->CanOmitMapChecks()) return;
   LOperand* input = instr->value();
   ASSERT(input->IsRegister());
   Register reg = ToRegister(input);
@@ -5021,6 +5016,7 @@
 
 
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+  if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
   Register reg = ToRegister(instr->temp());
 
   ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
@@ -5028,11 +5024,9 @@
 
   ASSERT(prototypes->length() == maps->length());
 
-  if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
-    for (int i = 0; i < prototypes->length(); i++) {
-      __ LoadHeapObject(reg, prototypes->at(i));
-      DoCheckMapCommon(reg, maps->at(i), instr);
-    }
+  for (int i = 0; i < prototypes->length(); i++) {
+    __ LoadHeapObject(reg, prototypes->at(i));
+    DoCheckMapCommon(reg, maps->at(i), instr);
   }
 }
 
@@ -5075,6 +5069,23 @@
   }
 
   __ bind(deferred->exit());
+
+  if (instr->hydrogen()->MustPrefillWithFiller()) {
+    if (instr->size()->IsConstantOperand()) {
+      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+      __ movl(temp, Immediate((size / kPointerSize) - 1));
+    } else {
+      temp = ToRegister(instr->size());
+      __ sar(temp, Immediate(kPointerSizeLog2));
+      __ decl(temp);
+    }
+    Label loop;
+    __ bind(&loop);
+    __ Move(FieldOperand(result, temp, times_pointer_size, 0),
+        isolate()->factory()->one_pointer_filler_map());
+    __ decl(temp);
+    __ j(not_zero, &loop);
+  }
 }
 
 
@@ -5347,11 +5358,15 @@
 
 
 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
-  if (instr->hydrogen_value()->IsSoftDeoptimize()) {
-    SoftDeoptimize(instr->environment());
-  } else {
-    DeoptimizeIf(no_condition, instr->environment());
+  Deoptimizer::BailoutType type = instr->hydrogen()->type();
+  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+  // needed return address), even though the implementation of LAZY and EAGER is
+  // now identical. When LAZY is eventually completely folded into EAGER, remove
+  // the special case below.
+  if (info()->IsStub() && type == Deoptimizer::EAGER) {
+    type = Deoptimizer::LAZY;
   }
+  DeoptimizeIf(no_condition, instr->environment(), type);
 }
 
 
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 0a43096..e3c567d 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -245,7 +245,6 @@
                     LEnvironment* environment,
                     Deoptimizer::BailoutType bailout_type);
   void DeoptimizeIf(Condition cc, LEnvironment* environment);
-  void SoftDeoptimize(LEnvironment* environment);
   void AddToTranslation(Translation* translation,
                         LOperand* op,
                         bool is_tagged,
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 2cec68b..25c22cc 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -710,11 +710,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
-  return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
   return AssignEnvironment(new(zone()) LDeoptimize);
 }
@@ -1949,8 +1944,10 @@
 
 
 LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
-  LUnallocated* temp = TempRegister();
+  LUnallocated* temp = NULL;
+  if (!instr->CanOmitPrototypeChecks()) temp = TempRegister();
   LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
+  if (instr->CanOmitPrototypeChecks()) return result;
   return AssignEnvironment(result);
 }
 
@@ -1962,8 +1959,10 @@
 
 
 LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* value = NULL;
+  if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value());
   LCheckMaps* result = new(zone()) LCheckMaps(value);
+  if (instr->CanOmitMapChecks()) return result;
   return AssignEnvironment(result);
 }
 
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 32ee0b9..92ddc89 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -40,12 +40,6 @@
 // Forward declarations.
 class LCodeGen;
 
-#define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
-  V(ControlInstruction)                         \
-  V(Call)                                       \
-  LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
 #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
@@ -72,6 +66,7 @@
   V(CheckFunction)                              \
   V(CheckInstanceType)                          \
   V(CheckMaps)                                  \
+  V(CheckMapValue)                              \
   V(CheckNonSmi)                                \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
@@ -89,15 +84,18 @@
   V(ConstantS)                                  \
   V(ConstantT)                                  \
   V(Context)                                    \
+  V(DateField)                                  \
   V(DebugBreak)                                 \
   V(DeclareGlobals)                             \
   V(Deoptimize)                                 \
   V(DivI)                                       \
   V(DoubleToI)                                  \
   V(DoubleToSmi)                                \
+  V(Drop)                                       \
   V(DummyUse)                                   \
   V(ElementsKind)                               \
-  V(MapEnumLength)                              \
+  V(ForInCacheArray)                            \
+  V(ForInPrepareMap)                            \
   V(FunctionLiteral)                            \
   V(GetCachedArrayIndex)                        \
   V(GlobalObject)                               \
@@ -105,13 +103,13 @@
   V(Goto)                                       \
   V(HasCachedArrayIndexAndBranch)               \
   V(HasInstanceTypeAndBranch)                   \
+  V(InnerAllocatedObject)                       \
   V(InstanceOf)                                 \
   V(InstanceOfKnownGlobal)                      \
   V(InstanceSize)                               \
   V(InstructionGap)                             \
   V(Integer32ToDouble)                          \
   V(Integer32ToSmi)                             \
-  V(Uint32ToDouble)                             \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
   V(IsObjectAndBranch)                          \
@@ -124,6 +122,7 @@
   V(LinkObjectInList)                           \
   V(LoadContextSlot)                            \
   V(LoadExternalArrayPointer)                   \
+  V(LoadFieldByIndex)                           \
   V(LoadFunctionPrototype)                      \
   V(LoadGlobalCell)                             \
   V(LoadGlobalGeneric)                          \
@@ -132,6 +131,7 @@
   V(LoadNamedField)                             \
   V(LoadNamedFieldPolymorphic)                  \
   V(LoadNamedGeneric)                           \
+  V(MapEnumLength)                              \
   V(MathAbs)                                    \
   V(MathCos)                                    \
   V(MathExp)                                    \
@@ -184,16 +184,10 @@
   V(TrapAllocationMemento)                      \
   V(Typeof)                                     \
   V(TypeofIsAndBranch)                          \
+  V(Uint32ToDouble)                             \
   V(UnknownOSRValue)                            \
   V(ValueOf)                                    \
-  V(ForInPrepareMap)                            \
-  V(ForInCacheArray)                            \
-  V(CheckMapValue)                              \
-  V(LoadFieldByIndex)                           \
-  V(DateField)                                  \
-  V(WrapReceiver)                               \
-  V(Drop)                                       \
-  V(InnerAllocatedObject)
+  V(WrapReceiver)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
@@ -433,6 +427,7 @@
 class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index b3e1590..13d7dda 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -2253,7 +2253,8 @@
   ASSERT(kNotStringTag != 0);
   const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+  const int kFlatAsciiStringTag =
+      kStringTag | kOneByteStringTag | kSeqStringTag;
 
   andl(scratch1, Immediate(kFlatAsciiStringMask));
   andl(scratch2, Immediate(kFlatAsciiStringMask));
@@ -2299,7 +2300,8 @@
   ASSERT(kNotStringTag != 0);
   const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+  const int kFlatAsciiStringTag =
+      kStringTag | kOneByteStringTag | kSeqStringTag;
 
   andl(scratch1, Immediate(kFlatAsciiStringMask));
   andl(scratch2, Immediate(kFlatAsciiStringMask));
@@ -3836,6 +3838,7 @@
                               Label* gc_required,
                               AllocationFlags flags) {
   ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+  ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index fa671dc..3895e52 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -49,6 +49,7 @@
 #include "snapshot.h"
 #include "unicode-inl.h"
 #include "utils.h"
+#include "vm-state.h"
 
 static const bool kLogThreading = false;
 
@@ -848,8 +849,8 @@
   // VMState is set to EXTERNAL.
   if (isolate->cpu_profiler()->is_profiling()) {
     CHECK_EQ(i::EXTERNAL, isolate->current_vm_state());
-    CHECK(isolate->external_callback());
-    CHECK_EQ(callback, isolate->external_callback());
+    CHECK(isolate->external_callback_scope());
+    CHECK_EQ(callback, isolate->external_callback_scope()->callback());
   }
 }
 
diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc
index eb72550..fe5e278 100644
--- a/test/cctest/test-cpu-profiler.cc
+++ b/test/cctest/test-cpu-profiler.cc
@@ -1081,3 +1081,231 @@
   v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
   cpu_profiler->DeleteAllCpuProfiles();
 }
+
+
+static const char* js_native_js_test_source = "function foo(iterations) {\n"
+"  var r = 0;\n"
+"  for (var i = 0; i < iterations; i++) { r += i; }\n"
+"  return r;\n"
+"}\n"
+"function bar(iterations) {\n"
+"  try { foo(iterations); } catch(e) {}\n"
+"}\n"
+"function start(duration) {\n"
+"  var start = Date.now();\n"
+"  while (Date.now() - start < duration) {\n"
+"    try {\n"
+"      CallJsFunction(bar, 10 * 1000);\n"
+"    } catch(e) {}\n"
+"  }\n"
+"}";
+
+static void CallJsFunction(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  v8::Handle<v8::Function> function = info[0].As<v8::Function>();
+  v8::Handle<v8::Value> argv[] = { info[1] };
+  function->Call(info.This(), ARRAY_SIZE(argv), argv);
+}
+
+
+// [Top down]:
+//    58     0   (root) #0 1
+//     2     2    (program) #0 2
+//    56     1    start #16 3
+//    55     0      CallJsFunction #0 4
+//    55     1        bar #16 5
+//    54    54          foo #16 6
+TEST(JsNativeJsSample) {
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+
+  v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
+      CallJsFunction);
+  v8::Local<v8::Function> func = func_template->GetFunction();
+  func->SetName(v8::String::New("CallJsFunction"));
+  env->Global()->Set(v8::String::New("CallJsFunction"), func);
+
+  v8::Script::Compile(v8::String::New(js_native_js_test_source))->Run();
+  v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("start")));
+
+  int32_t duration_ms = 20;
+  v8::Handle<v8::Value> args[] = { v8::Integer::New(duration_ms) };
+  const v8::CpuProfile* profile =
+      RunProfiler(env, function, args, ARRAY_SIZE(args), 50);
+
+  const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+  {
+    ScopedVector<v8::Handle<v8::String> > names(3);
+    names[0] = v8::String::New(ProfileGenerator::kGarbageCollectorEntryName);
+    names[1] = v8::String::New(ProfileGenerator::kProgramEntryName);
+    names[2] = v8::String::New("start");
+    CheckChildrenNames(root, names);
+  }
+
+  const v8::CpuProfileNode* startNode = GetChild(root, "start");
+  CHECK_EQ(1, startNode->GetChildrenCount());
+  const v8::CpuProfileNode* nativeFunctionNode =
+      GetChild(startNode, "CallJsFunction");
+
+  CHECK_EQ(1, nativeFunctionNode->GetChildrenCount());
+  const v8::CpuProfileNode* barNode = GetChild(nativeFunctionNode, "bar");
+
+  CHECK_EQ(1, barNode->GetChildrenCount());
+  GetChild(barNode, "foo");
+
+  v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+  cpu_profiler->DeleteAllCpuProfiles();
+}
+
+
+static const char* js_native_js_runtime_js_test_source =
+"function foo(iterations) {\n"
+"  var r = 0;\n"
+"  for (var i = 0; i < iterations; i++) { r += i; }\n"
+"  return r;\n"
+"}\n"
+"var bound = foo.bind(this);\n"
+"function bar(iterations) {\n"
+"  try { bound(iterations); } catch(e) {}\n"
+"}\n"
+"function start(duration) {\n"
+"  var start = Date.now();\n"
+"  while (Date.now() - start < duration) {\n"
+"    try {\n"
+"      CallJsFunction(bar, 10 * 1000);\n"
+"    } catch(e) {}\n"
+"  }\n"
+"}";
+
+
+// [Top down]:
+//    57     0   (root) #0 1
+//    55     1    start #16 3
+//    54     0      CallJsFunction #0 4
+//    54     3        bar #16 5
+//    51    51          foo #16 6
+//     2     2    (program) #0 2
+TEST(JsNativeJsRuntimeJsSample) {
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+
+  v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
+      CallJsFunction);
+  v8::Local<v8::Function> func = func_template->GetFunction();
+  func->SetName(v8::String::New("CallJsFunction"));
+  env->Global()->Set(v8::String::New("CallJsFunction"), func);
+
+  v8::Script::Compile(v8::String::New(js_native_js_runtime_js_test_source))->
+      Run();
+  v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("start")));
+
+  int32_t duration_ms = 20;
+  v8::Handle<v8::Value> args[] = { v8::Integer::New(duration_ms) };
+  const v8::CpuProfile* profile =
+      RunProfiler(env, function, args, ARRAY_SIZE(args), 50);
+
+  const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+  ScopedVector<v8::Handle<v8::String> > names(3);
+  names[0] = v8::String::New(ProfileGenerator::kGarbageCollectorEntryName);
+  names[1] = v8::String::New(ProfileGenerator::kProgramEntryName);
+  names[2] = v8::String::New("start");
+  CheckChildrenNames(root, names);
+
+  const v8::CpuProfileNode* startNode = GetChild(root, "start");
+  CHECK_EQ(1, startNode->GetChildrenCount());
+  const v8::CpuProfileNode* nativeFunctionNode =
+      GetChild(startNode, "CallJsFunction");
+
+  CHECK_EQ(1, nativeFunctionNode->GetChildrenCount());
+  const v8::CpuProfileNode* barNode = GetChild(nativeFunctionNode, "bar");
+
+  CHECK_EQ(1, barNode->GetChildrenCount());
+  GetChild(barNode, "foo");
+
+  v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+  cpu_profiler->DeleteAllCpuProfiles();
+}
+
+
+static void CallJsFunction2(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  CallJsFunction(info);
+}
+
+
+static const char* js_native1_js_native2_js_test_source =
+"function foo(iterations) {\n"
+"  var r = 0;\n"
+"  for (var i = 0; i < iterations; i++) { r += i; }\n"
+"  return r;\n"
+"}\n"
+"function bar(iterations) {\n"
+"  CallJsFunction2(foo, iterations);\n"
+"}\n"
+"function start(duration) {\n"
+"  var start = Date.now();\n"
+"  while (Date.now() - start < duration) {\n"
+"    try {\n"
+"      CallJsFunction1(bar, 10 * 1000);\n"
+"    } catch(e) {}\n"
+"  }\n"
+"}";
+
+
+// [Top down]:
+//    57     0   (root) #0 1
+//    55     1    start #16 3
+//    54     0      CallJsFunction1 #0 4
+//    54     0        bar #16 5
+//    54     0          CallJsFunction2 #0 6
+//    54    54            foo #16 7
+//     2     2    (program) #0 2
+TEST(JsNative1JsNative2JsSample) {
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+
+  v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
+      CallJsFunction);
+  v8::Local<v8::Function> func1 = func_template->GetFunction();
+  func1->SetName(v8::String::New("CallJsFunction1"));
+  env->Global()->Set(v8::String::New("CallJsFunction1"), func1);
+
+  v8::Local<v8::Function> func2 = v8::FunctionTemplate::New(
+      CallJsFunction2)->GetFunction();
+  func2->SetName(v8::String::New("CallJsFunction2"));
+  env->Global()->Set(v8::String::New("CallJsFunction2"), func2);
+
+  v8::Script::Compile(v8::String::New(js_native1_js_native2_js_test_source))->
+      Run();
+  v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("start")));
+
+  int32_t duration_ms = 20;
+  v8::Handle<v8::Value> args[] = { v8::Integer::New(duration_ms) };
+  const v8::CpuProfile* profile =
+      RunProfiler(env, function, args, ARRAY_SIZE(args), 50);
+
+  const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+  ScopedVector<v8::Handle<v8::String> > names(3);
+  names[0] = v8::String::New(ProfileGenerator::kGarbageCollectorEntryName);
+  names[1] = v8::String::New(ProfileGenerator::kProgramEntryName);
+  names[2] = v8::String::New("start");
+  CheckChildrenNames(root, names);
+
+  const v8::CpuProfileNode* startNode = GetChild(root, "start");
+  CHECK_EQ(1, startNode->GetChildrenCount());
+  const v8::CpuProfileNode* nativeNode1 =
+      GetChild(startNode, "CallJsFunction1");
+
+  CHECK_EQ(1, nativeNode1->GetChildrenCount());
+  const v8::CpuProfileNode* barNode = GetChild(nativeNode1, "bar");
+
+  CHECK_EQ(1, barNode->GetChildrenCount());
+  const v8::CpuProfileNode* nativeNode2 = GetChild(barNode, "CallJsFunction2");
+
+  CHECK_EQ(1, nativeNode2->GetChildrenCount());
+  GetChild(nativeNode2, "foo");
+
+  v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+  cpu_profiler->DeleteAllCpuProfiles();
+}
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index d2b9156..a55238b 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -984,7 +984,7 @@
   // just enough room to allocate JSObject and thus fill the newspace.
 
   int allocation_amount = Min(FixedArray::kMaxSize,
-                              HEAP->MaxObjectSizeInNewSpace());
+                              Page::kMaxNonCodeHeapObjectSize);
   int allocation_len = LenFromSize(allocation_amount);
   NewSpace* new_space = HEAP->new_space();
   Address* top_addr = new_space->allocation_top_address();
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index 81cb001..bf1151e 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -27,6 +27,7 @@
 //
 // Tests of logging functions from log.h
 
+#define V8_DISABLE_DEPRECATIONS 1
 #ifdef __linux__
 #include <pthread.h>
 #include <signal.h>
@@ -43,6 +44,7 @@
 #include "v8utils.h"
 #include "cctest.h"
 #include "vm-state-inl.h"
+#undef V8_DISABLE_DEPRECATIONS
 
 using v8::internal::Address;
 using v8::internal::EmbeddedVector;
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index 626df16..0d8c00d 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -119,10 +119,8 @@
   HEAP->CollectGarbage(OLD_POINTER_SPACE);
 
   // Allocate a big Fixed array in the new space.
-  int max_size =
-      Min(Page::kMaxNonCodeHeapObjectSize, HEAP->MaxObjectSizeInNewSpace());
-
-  int length = (max_size - FixedArray::kHeaderSize) / (2*kPointerSize);
+  int length = (Page::kMaxNonCodeHeapObjectSize -
+      FixedArray::kHeaderSize) / (2 * kPointerSize);
   Object* obj = i::Isolate::Current()->heap()->AllocateFixedArray(length)->
       ToObjectChecked();
 
diff --git a/test/mjsunit/allocation-folding.js b/test/mjsunit/allocation-folding.js
index a730bf1..fe5fa6d 100644
--- a/test/mjsunit/allocation-folding.js
+++ b/test/mjsunit/allocation-folding.js
@@ -25,7 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --nouse-osr
+// Flags: --allow-natives-syntax --nouse-osr --expose-gc
+
+// Test loop barrier when folding allocations.
+
 function f() {
   var elem1 = [1,2,3];
   for (var i=0; i < 100000; i++) {
@@ -39,8 +42,38 @@
 %OptimizeFunctionOnNextCall(f);
 var result = f();
 
-for (var i=0; i < 100000; i++) {
-  var bar = [1];
-}
+gc();
 
 assertEquals(result[2], 3);
+
+// Test allocation folding of doubles.
+
+function doubles() {
+  var elem1 = [1.1, 1.2];
+  var elem2 = [2.1, 2.2];
+  return elem2;
+}
+
+doubles(); doubles(); doubles();
+%OptimizeFunctionOnNextCall(doubles);
+var result = doubles();
+
+gc();
+
+assertEquals(result[1], 2.2);
+
+// Test allocation folding of doubles into non-doubles.
+
+function doubles_int() {
+  var elem1 = [2, 3];
+  var elem2 = [2.1, 3.1];
+  return elem2;
+}
+
+doubles_int(); doubles_int(); doubles_int();
+%OptimizeFunctionOnNextCall(doubles_int);
+var result = doubles_int();
+
+gc();
+
+assertEquals(result[1], 3.1);
diff --git a/test/mjsunit/harmony/collections.js b/test/mjsunit/harmony/collections.js
index 67f91a8..3e87e6b 100644
--- a/test/mjsunit/harmony/collections.js
+++ b/test/mjsunit/harmony/collections.js
@@ -288,6 +288,22 @@
 assertEquals("WeakSet", WeakSet.name);
 
 
+// Test prototype property of Set, Map, WeakMap and WeakSet.
+function TestPrototype(C) {
+  assertTrue(C.prototype instanceof Object);
+  assertEquals({
+    value: {},
+    writable: true,  // TODO(2793): This should be non-writable.
+    enumerable: false,
+    configurable: false
+  }, Object.getOwnPropertyDescriptor(C, "prototype"));
+}
+TestPrototype(Set);
+TestPrototype(Map);
+TestPrototype(WeakMap);
+TestPrototype(WeakSet);
+
+
 // Test constructor property of the Set, Map, WeakMap and WeakSet prototype.
 function TestConstructor(C) {
   assertFalse(C === Object.prototype.constructor);
@@ -301,6 +317,7 @@
 TestConstructor(WeakSet);
 
 
+// Test the Set, Map, WeakMap and WeakSet global properties themselves.
 function TestDescriptor(global, C) {
   assertEquals({
     value: C,
diff --git a/test/mjsunit/omit-constant-mapcheck.js b/test/mjsunit/omit-constant-mapcheck.js
new file mode 100644
index 0000000..ae6308f
--- /dev/null
+++ b/test/mjsunit/omit-constant-mapcheck.js
@@ -0,0 +1,70 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var g1 = { a:1 }
+
+function load() {
+  return g1.a;
+}
+
+assertEquals(1, load());
+assertEquals(1, load());
+%OptimizeFunctionOnNextCall(load);
+assertEquals(1, load());
+delete g1.a;
+assertEquals(undefined, load());
+
+var g2 = { a:2 }
+
+function load2() {
+  return g2.a;
+}
+
+assertEquals(2, load2());
+assertEquals(2, load2());
+%OptimizeFunctionOnNextCall(load2);
+assertEquals(2, load2());
+g2.b = 10;
+g2.a = 5;
+assertEquals(5, load2());
+
+var g3 = { a:2, b:9, c:1 }
+
+function store(v) {
+  g3.a = v;
+  return g3.a;
+}
+
+assertEquals(5, store(5));
+assertEquals(8, store(8));
+%OptimizeFunctionOnNextCall(store);
+assertEquals(10, store(10));
+delete g3.c;
+store(7);
+assertEquals({a:7, b:9}, g3);