Version 3.17.0

Enabled parallel sweeping.

Don't try to unlink instructions twice during GVN (Chromium issue 175141)

Fixed code flusher disabling while marking incrementally. (Chromium issue 173458, 168582)

Don't use TLS for space iterators. (issue 2531)

Added new GetHeapStatistics API entry and deprecated old one.

Fixed DoubleStackSlot-to-DoubleStackSlot moves on ia32. Unified platform-independent code. (Chromium issue 173907)

Added --trace-array-abuse to help find OOB accesses.

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@13663 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/accessors.cc b/src/accessors.cc
index efcaf8f..07ff720 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -880,7 +880,7 @@
     int index,
     PropertyAttributes attributes) {
   Factory* factory = name->GetIsolate()->factory();
-  Handle<AccessorInfo> info = factory->NewAccessorInfo();
+  Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
   info->set_property_attributes(attributes);
   info->set_all_can_read(true);
   info->set_all_can_write(true);
diff --git a/src/api.cc b/src/api.cc
index d3d472d..305fbfb 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1106,7 +1106,8 @@
       v8::AccessControl settings,
       v8::PropertyAttribute attributes,
       v8::Handle<AccessorSignature> signature) {
-  i::Handle<i::AccessorInfo> obj = FACTORY->NewAccessorInfo();
+  i::Handle<i::ExecutableAccessorInfo> obj =
+      FACTORY->NewExecutableAccessorInfo();
   SET_FIELD_WRAPPED(obj, set_getter, getter);
   SET_FIELD_WRAPPED(obj, set_setter, setter);
   if (data.IsEmpty()) data = v8::Undefined();
@@ -4407,23 +4408,18 @@
 
 
 void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
-  if (!i::Isolate::Current()->IsInitialized()) {
+  i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+  if (isolate == NULL || !isolate->IsInitialized()) {
     // Isolate is unitialized thus heap is not configured yet.
-    heap_statistics->set_total_heap_size(0);
-    heap_statistics->set_total_heap_size_executable(0);
-    heap_statistics->set_total_physical_size(0);
-    heap_statistics->set_used_heap_size(0);
-    heap_statistics->set_heap_size_limit(0);
+    heap_statistics->total_heap_size_ = 0;
+    heap_statistics->total_heap_size_executable_ = 0;
+    heap_statistics->total_physical_size_ = 0;
+    heap_statistics->used_heap_size_ = 0;
+    heap_statistics->heap_size_limit_ = 0;
     return;
   }
-
-  i::Heap* heap = i::Isolate::Current()->heap();
-  heap_statistics->set_total_heap_size(heap->CommittedMemory());
-  heap_statistics->set_total_heap_size_executable(
-      heap->CommittedMemoryExecutable());
-  heap_statistics->set_total_physical_size(heap->CommittedPhysicalMemory());
-  heap_statistics->set_used_heap_size(heap->SizeOfObjects());
-  heap_statistics->set_heap_size_limit(heap->MaxReserved());
+  Isolate* ext_isolate = reinterpret_cast<Isolate*>(isolate);
+  return ext_isolate->GetHeapStatistics(heap_statistics);
 }
 
 
@@ -5609,6 +5605,18 @@
 }
 
 
+void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  i::Heap* heap = isolate->heap();
+  heap_statistics->total_heap_size_ = heap->CommittedMemory();
+  heap_statistics->total_heap_size_executable_ =
+      heap->CommittedMemoryExecutable();
+  heap_statistics->total_physical_size_ = heap->CommittedPhysicalMemory();
+  heap_statistics->used_heap_size_ = heap->SizeOfObjects();
+  heap_statistics->heap_size_limit_ = heap->MaxReserved();
+}
+
+
 String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
     : str_(NULL), length_(0) {
   i::Isolate* isolate = i::Isolate::Current();
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index dacdd5a..208ca21 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1685,6 +1685,11 @@
 }
 
 
+LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
+  return NULL;
+}
+
+
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
   LOperand* value = UseRegisterOrConstantAtStart(instr->index());
   LOperand* length = UseRegister(instr->length());
@@ -1824,6 +1829,12 @@
 }
 
 
+LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
 LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
   return AssignEnvironment(new(zone()) LCheckFunction(value));
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 45ebfae..4564d26 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -821,7 +821,7 @@
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-
+  ASSERT(info()->IsOptimizing() || info()->IsStub());
   Deoptimizer::BailoutType bailout_type = info()->IsStub()
       ? Deoptimizer::LAZY
       : Deoptimizer::EAGER;
@@ -832,18 +832,23 @@
   }
 
   ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on ARM.
-
   if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) {
     __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
     return;
   }
 
-  if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
+  if (FLAG_trap_on_deopt) {
+    __ stop("trap_on_deopt", cc);
+  }
 
-  bool needs_lazy_deopt = info()->IsStub();
   ASSERT(info()->IsStub() || frame_is_built_);
-  if (cc == al && !needs_lazy_deopt) {
-    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+  bool needs_lazy_deopt = info()->IsStub();
+  if (cc == al && frame_is_built_) {
+    if (needs_lazy_deopt) {
+      __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+    } else {
+      __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+    }
   } else {
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
@@ -4310,28 +4315,9 @@
 }
 
 
-void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
-                                      HValue* value,
-                                      LOperand* operand) {
-  if (value->representation().IsTagged() && !value->type().IsSmi()) {
-    if (operand->IsRegister()) {
-      __ tst(ToRegister(operand), Operand(kSmiTagMask));
-    } else {
-      __ mov(ip, ToOperand(operand));
-      __ tst(ip, Operand(kSmiTagMask));
-    }
-    DeoptimizeIf(ne, environment);
-  }
-}
-
-
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
-  DeoptIfTaggedButNotSmi(instr->environment(),
-                         instr->hydrogen()->length(),
-                         instr->length());
-  DeoptIfTaggedButNotSmi(instr->environment(),
-                         instr->hydrogen()->index(),
-                         instr->index());
+  if (instr->hydrogen()->skip_check()) return;
+
   if (instr->index()->IsConstantOperand()) {
     int constant_index =
         ToInteger32(LConstantOperand::cast(instr->index()));
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index f7d6bad..2c3f01f 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -326,10 +326,6 @@
                         LEnvironment* env,
                         NumberUntagDMode mode);
 
-  void DeoptIfTaggedButNotSmi(LEnvironment* environment,
-                              HValue* value,
-                              LOperand* operand);
-
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
   // true and false label should be made, to optimize fallthrough.
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 37e6be2..50bd22f 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1210,14 +1210,15 @@
 }
 
 
-void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
-                                                  Register name_reg,
-                                                  Register scratch1,
-                                                  Register scratch2,
-                                                  Register scratch3,
-                                                  Handle<AccessorInfo> callback,
-                                                  Handle<String> name,
-                                                  Label* miss) {
+void StubCompiler::GenerateDictionaryLoadCallback(
+    Register receiver,
+    Register name_reg,
+    Register scratch1,
+    Register scratch2,
+    Register scratch3,
+    Handle<ExecutableAccessorInfo> callback,
+    Handle<String> name,
+    Label* miss) {
   ASSERT(!receiver.is(scratch1));
   ASSERT(!receiver.is(scratch2));
   ASSERT(!receiver.is(scratch3));
@@ -1249,17 +1250,18 @@
 }
 
 
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
-                                        Handle<JSObject> holder,
-                                        Register receiver,
-                                        Register name_reg,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Register scratch3,
-                                        Register scratch4,
-                                        Handle<AccessorInfo> callback,
-                                        Handle<String> name,
-                                        Label* miss) {
+void StubCompiler::GenerateLoadCallback(
+    Handle<JSObject> object,
+    Handle<JSObject> holder,
+    Register receiver,
+    Register name_reg,
+    Register scratch1,
+    Register scratch2,
+    Register scratch3,
+    Register scratch4,
+    Handle<ExecutableAccessorInfo> callback,
+    Handle<String> name,
+    Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
@@ -1278,7 +1280,8 @@
   __ mov(scratch2, sp);  // scratch2 = AccessorInfo::args_
   if (heap()->InNewSpace(callback->data())) {
     __ Move(scratch3, callback);
-    __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
+    __ ldr(scratch3,
+           FieldMemOperand(scratch3, ExecutableAccessorInfo::kDataOffset));
   } else {
     __ Move(scratch3, Handle<Object>(callback->data()));
   }
@@ -1331,8 +1334,9 @@
     if (lookup->IsField()) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-               lookup->GetCallbackObject()->IsAccessorInfo()) {
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+               lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+      ExecutableAccessorInfo* callback =
+          ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
       compile_followup_inline = callback->getter() != NULL &&
           callback->IsCompatibleReceiver(*object);
     }
@@ -1413,8 +1417,8 @@
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      Handle<AccessorInfo> callback(
-          AccessorInfo::cast(lookup->GetCallbackObject()));
+      Handle<ExecutableAccessorInfo> callback(
+          ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
@@ -1430,7 +1434,7 @@
         __ push(holder_reg);
       }
       __ ldr(scratch3,
-             FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
+             FieldMemOperand(scratch2, ExecutableAccessorInfo::kDataOffset));
       __ mov(scratch1, Operand(ExternalReference::isolate_address()));
       __ Push(scratch3, scratch1, scratch2, name_reg);
 
@@ -2698,7 +2702,7 @@
     Handle<String> name,
     Handle<JSObject> receiver,
     Handle<JSObject> holder,
-    Handle<AccessorInfo> callback) {
+    Handle<ExecutableAccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : receiver
diff --git a/src/builtins.cc b/src/builtins.cc
index 16e672c..188528b 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -525,12 +525,9 @@
   }
   if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
 
-  if (FLAG_harmony_observation &&
-      JSObject::cast(receiver)->map()->is_observed()) {
-    return CallJsBuiltin(isolate, "ArrayPush", args);
-  }
-
   JSArray* array = JSArray::cast(receiver);
+  ASSERT(!array->map()->is_observed());
+
   ElementsKind kind = array->GetElementsKind();
 
   if (IsFastSmiOrObjectElementsKind(kind)) {
@@ -642,10 +639,7 @@
   if (!maybe_elms->To(&elms_obj)) return maybe_elms;
 
   JSArray* array = JSArray::cast(receiver);
-
-  if (FLAG_harmony_observation && array->map()->is_observed()) {
-    return CallJsBuiltin(isolate, "ArrayPop", args);
-  }
+  ASSERT(!array->map()->is_observed());
 
   int len = Smi::cast(array->length())->value();
   if (len == 0) return heap->undefined_value();
@@ -680,10 +674,7 @@
     return CallJsBuiltin(isolate, "ArrayShift", args);
   }
   JSArray* array = JSArray::cast(receiver);
-
-  if (FLAG_harmony_observation && array->map()->is_observed()) {
-    return CallJsBuiltin(isolate, "ArrayShift", args);
-  }
+  ASSERT(!array->map()->is_observed());
 
   int len = Smi::cast(array->length())->value();
   if (len == 0) return heap->undefined_value();
@@ -734,15 +725,12 @@
     return CallJsBuiltin(isolate, "ArrayUnshift", args);
   }
   JSArray* array = JSArray::cast(receiver);
+  ASSERT(!array->map()->is_observed());
   if (!array->HasFastSmiOrObjectElements()) {
     return CallJsBuiltin(isolate, "ArrayUnshift", args);
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
 
-  if (FLAG_harmony_observation && array->map()->is_observed()) {
-    return CallJsBuiltin(isolate, "ArrayUnshift", args);
-  }
-
   int len = Smi::cast(array->length())->value();
   int to_add = args.length() - 1;
   int new_length = len + to_add;
@@ -938,10 +926,7 @@
     return CallJsBuiltin(isolate, "ArraySplice", args);
   }
   JSArray* array = JSArray::cast(receiver);
-
-  if (FLAG_harmony_observation && array->map()->is_observed()) {
-    return CallJsBuiltin(isolate, "ArraySplice", args);
-  }
+  ASSERT(!array->map()->is_observed());
 
   int len = Smi::cast(array->length())->value();
 
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index f837045..2024d08 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -76,8 +76,9 @@
 
 bool CodeStubGraphBuilderBase::BuildGraph() {
   if (FLAG_trace_hydrogen) {
+    const char* name = CodeStub::MajorName(stub()->MajorKey(), false);
     PrintF("-----------------------------------------------------------\n");
-    PrintF("Compiling stub using hydrogen\n");
+    PrintF("Compiling stub %s using hydrogen\n", name);
     HTracer::Instance()->TraceCompilation(&info_);
   }
   HBasicBlock* next_block = graph()->CreateBasicBlock();
@@ -177,7 +178,11 @@
   HConstant* max_alloc_size =
       new(zone) HConstant(kMinFreeNewSpaceAfterGC, Representation::Integer32());
   AddInstruction(max_alloc_size);
-  AddInstruction(new(zone) HBoundsCheck(array_length, max_alloc_size));
+  // Since we're forcing Integer32 representation for this HBoundsCheck,
+  // there's no need to Smi-check the index.
+  AddInstruction(
+      new(zone) HBoundsCheck(array_length, max_alloc_size,
+                             DONT_ALLOW_SMI_KEY, Representation::Integer32()));
 
   current_block()->UpdateEnvironment(new(zone) HEnvironment(zone));
 
diff --git a/src/d8.cc b/src/d8.cc
index 6d63ef1..ea723dc 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -1378,6 +1378,7 @@
            "-------------+\n");
     delete [] counters;
   }
+  delete context_mutex_;
   delete counters_file_;
   delete counter_map_;
 #endif  // V8_SHARED
@@ -1497,7 +1498,6 @@
   Context::Scope context_scope(evaluation_context_);
   HandleScope outer_scope;
   Handle<String> name = String::New("(d8)");
-  DumbLineEditor dumb_line_editor(isolate);
   LineEditor* console = LineEditor::Get();
   printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
   console->Open();
@@ -1902,6 +1902,7 @@
   if (!SetOptions(argc, argv)) return 1;
   int result = 0;
   Isolate* isolate = Isolate::GetCurrent();
+  DumbLineEditor dumb_line_editor(isolate);
   {
     Initialize(isolate);
     Symbols symbols(isolate);
diff --git a/src/debug.cc b/src/debug.cc
index 866839d..05050d1 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -2005,14 +2005,15 @@
     {
       // We are going to iterate heap to find all functions without
       // debug break slots.
-      isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
-                                          "preparing for breakpoints");
+      Heap* heap = isolate_->heap();
+      heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                              "preparing for breakpoints");
 
       // Ensure no GC in this scope as we are going to use gc_metadata
       // field in the Code object to mark active functions.
       AssertNoAllocation no_allocation;
 
-      Object* active_code_marker = isolate_->heap()->the_hole_value();
+      Object* active_code_marker = heap->the_hole_value();
 
       CollectActiveFunctionsFromThread(isolate_,
                                        isolate_->thread_local_top(),
@@ -2026,7 +2027,7 @@
       // Scan the heap for all non-optimized functions which have no
       // debug break slots and are not active or inlined into an active
       // function and mark them for lazy compilation.
-      HeapIterator iterator;
+      HeapIterator iterator(heap);
       HeapObject* obj = NULL;
       while (((obj = iterator.next()) != NULL)) {
         if (obj->IsJSFunction()) {
@@ -2121,11 +2122,12 @@
   int target_start_position = RelocInfo::kNoPosition;
   Handle<JSFunction> target_function;
   Handle<SharedFunctionInfo> target;
+  Heap* heap = isolate_->heap();
   while (!done) {
     { // Extra scope for iterator and no-allocation.
-      isolate_->heap()->EnsureHeapIsIterable();
+      heap->EnsureHeapIsIterable();
       AssertNoAllocation no_alloc_during_heap_iteration;
-      HeapIterator iterator;
+      HeapIterator iterator(heap);
       for (HeapObject* obj = iterator.next();
            obj != NULL; obj = iterator.next()) {
         bool found_next_candidate = false;
@@ -2185,9 +2187,7 @@
       }  // End for loop.
     }  // End no-allocation scope.
 
-    if (target.is_null()) {
-      return isolate_->heap()->undefined_value();
-    }
+    if (target.is_null()) return heap->undefined_value();
 
     // There will be at least one break point when we are done.
     has_break_points_ = true;
@@ -2461,7 +2461,7 @@
 
   // Scan heap for Script objects.
   int count = 0;
-  HeapIterator iterator;
+  HeapIterator iterator(heap);
   AssertNoAllocation no_allocation;
 
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
diff --git a/src/elements.cc b/src/elements.cc
index 5b454e5..d0b6c3b 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -30,7 +30,7 @@
 #include "objects.h"
 #include "elements.h"
 #include "utils.h"
-
+#include "v8conversions.h"
 
 // Each concrete ElementsAccessor can handle exactly one ElementsKind,
 // several abstract ElementsAccessor classes are used to allow sharing
@@ -483,6 +483,66 @@
 }
 
 
+static void TraceTopFrame() {
+  StackFrameIterator it;
+  if (it.done()) {
+    PrintF("unknown location (no JavaScript frames present)");
+    return;
+  }
+  StackFrame* raw_frame = it.frame();
+  if (raw_frame->is_internal()) {
+    Isolate* isolate = Isolate::Current();
+    Code* apply_builtin = isolate->builtins()->builtin(
+        Builtins::kFunctionApply);
+    if (raw_frame->unchecked_code() == apply_builtin) {
+      PrintF("apply from ");
+      it.Advance();
+      raw_frame = it.frame();
+    }
+  }
+  JavaScriptFrame::PrintTop(stdout, false, true);
+}
+
+
+void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key,
+                     bool allow_appending) {
+  Object* raw_length = NULL;
+  const char* elements_type = "array";
+  if (obj->IsJSArray()) {
+    JSArray* array = JSArray::cast(obj);
+    raw_length = array->length();
+  } else {
+    raw_length = Smi::FromInt(obj->elements()->length());
+    elements_type = "object";
+  }
+
+  if (raw_length->IsNumber()) {
+    double n = raw_length->Number();
+    if (FastI2D(FastD2UI(n)) == n) {
+      int32_t int32_length = DoubleToInt32(n);
+      uint32_t compare_length = static_cast<uint32_t>(int32_length);
+      if (allow_appending) compare_length++;
+      if (key >= compare_length) {
+        PrintF("[OOB %s %s (%s length = %d, element accessed = %d) in ",
+               elements_type, op, elements_type,
+               static_cast<int>(int32_length),
+               static_cast<int>(key));
+        TraceTopFrame();
+        PrintF("]\n");
+      }
+    } else {
+      PrintF("[%s elements length not integer value in ", elements_type);
+      TraceTopFrame();
+      PrintF("]\n");
+    }
+  } else {
+    PrintF("[%s elements length not a number in ", elements_type);
+    TraceTopFrame();
+    PrintF("]\n");
+  }
+}
+
+
 // Base class for element handler implementations. Contains the
 // the common logic for objects with different ElementsKinds.
 // Subclasses must specialize method for which the element
@@ -570,6 +630,17 @@
     if (backing_store == NULL) {
       backing_store = holder->elements();
     }
+
+    if (!IsExternalArrayElementsKind(ElementsTraits::Kind) &&
+        FLAG_trace_js_array_abuse) {
+      CheckArrayAbuse(holder, "elements read", key);
+    }
+
+    if (IsExternalArrayElementsKind(ElementsTraits::Kind) &&
+        FLAG_trace_external_array_abuse) {
+      CheckArrayAbuse(holder, "external elements read", key);
+    }
+
     return ElementsAccessorSubclass::GetImpl(
         receiver, holder, key, backing_store);
   }
diff --git a/src/elements.h b/src/elements.h
index e25076b..e31ceb4 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -197,6 +197,9 @@
   DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
 };
 
+void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key,
+                     bool allow_appending = false);
+
 } }  // namespace v8::internal
 
 #endif  // V8_ELEMENTS_H_
diff --git a/src/factory.cc b/src/factory.cc
index 25f197b..1a275e9 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -363,9 +363,19 @@
 }
 
 
-Handle<AccessorInfo> Factory::NewAccessorInfo() {
-  Handle<AccessorInfo> info =
-      Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE));
+Handle<DeclaredAccessorInfo> Factory::NewDeclaredAccessorInfo() {
+  Handle<DeclaredAccessorInfo> info =
+      Handle<DeclaredAccessorInfo>::cast(
+          NewStruct(DECLARED_ACCESSOR_INFO_TYPE));
+  info->set_flag(0);  // Must clear the flag, it was initialized as undefined.
+  return info;
+}
+
+
+Handle<ExecutableAccessorInfo> Factory::NewExecutableAccessorInfo() {
+  Handle<ExecutableAccessorInfo> info =
+      Handle<ExecutableAccessorInfo>::cast(
+          NewStruct(EXECUTABLE_ACCESSOR_INFO_TYPE));
   info->set_flag(0);  // Must clear the flag, it was initialized as undefined.
   return info;
 }
diff --git a/src/factory.h b/src/factory.h
index 522639d..6989396 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -202,7 +202,9 @@
   // the old generation).
   Handle<Struct> NewStruct(InstanceType type);
 
-  Handle<AccessorInfo> NewAccessorInfo();
+  Handle<DeclaredAccessorInfo> NewDeclaredAccessorInfo();
+
+  Handle<ExecutableAccessorInfo> NewExecutableAccessorInfo();
 
   Handle<Script> NewScript(Handle<String> source);
 
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 4a0d3c6..fa38773 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -363,6 +363,14 @@
 
 // debug.cc
 DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
+DEFINE_bool(trace_js_array_abuse, false,
+            "trace out-of-bounds accesses to JS arrays")
+DEFINE_bool(trace_external_array_abuse, false,
+            "trace out-of-bounds-accesses to external arrays")
+DEFINE_bool(trace_array_abuse, false,
+            "trace out-of-bounds accesses to all arrays")
+DEFINE_implication(trace_array_abuse, trace_js_array_abuse)
+DEFINE_implication(trace_array_abuse, trace_external_array_abuse)
 DEFINE_bool(debugger_auto_break, true,
             "automatically set the debug break flag when debugger commands are "
             "in the queue")
@@ -422,7 +430,7 @@
             "trace progress of the incremental marking")
 DEFINE_bool(track_gc_object_stats, false,
             "track object counts and memory usage")
-DEFINE_bool(parallel_sweeping, false, "enable parallel sweeping")
+DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping")
 DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
 DEFINE_int(sweeper_threads, 1,
            "number of parallel and concurrent sweeping threads")
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 45a93f9..ca8eda1 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -33,8 +33,8 @@
 namespace v8 {
 namespace internal {
 
-HeapProfiler::HeapProfiler()
-    : snapshots_(new HeapSnapshotsCollection()),
+HeapProfiler::HeapProfiler(Heap* heap)
+    : snapshots_(new HeapSnapshotsCollection(heap)),
       next_snapshot_uid_(1) {
 }
 
@@ -45,15 +45,16 @@
 
 
 void HeapProfiler::ResetSnapshots() {
+  Heap* the_heap = heap();
   delete snapshots_;
-  snapshots_ = new HeapSnapshotsCollection();
+  snapshots_ = new HeapSnapshotsCollection(the_heap);
 }
 
 
 void HeapProfiler::SetUp() {
   Isolate* isolate = Isolate::Current();
   if (isolate->heap_profiler() == NULL) {
-    isolate->set_heap_profiler(new HeapProfiler());
+    isolate->set_heap_profiler(new HeapProfiler(isolate->heap()));
   }
 }
 
@@ -139,7 +140,7 @@
   bool generation_completed = true;
   switch (s_type) {
     case HeapSnapshot::kFull: {
-      HeapSnapshotGenerator generator(result, control, resolver);
+      HeapSnapshotGenerator generator(result, control, resolver, heap());
       generation_completed = generator.GenerateSnapshot();
       break;
     }
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index 9d3ba6f..c8c94f5 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -83,7 +83,7 @@
   }
 
  private:
-  HeapProfiler();
+  explicit HeapProfiler(Heap* heap);
   ~HeapProfiler();
   HeapSnapshot* TakeSnapshotImpl(
       const char* name,
@@ -101,6 +101,8 @@
   void StopHeapObjectsTrackingImpl();
   SnapshotObjectId PushHeapObjectsStatsImpl(OutputStream* stream);
 
+  Heap* heap() const { return snapshots_->heap(); }
+
   HeapSnapshotsCollection* snapshots_;
   unsigned next_snapshot_uid_;
   List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
diff --git a/src/heap.cc b/src/heap.cc
index 6637b9a..8cef1ae 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -449,7 +449,7 @@
 
 intptr_t Heap::SizeOfObjects() {
   intptr_t total = 0;
-  AllSpaces spaces;
+  AllSpaces spaces(this);
   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
     total += space->SizeOfObjects();
   }
@@ -458,7 +458,7 @@
 
 
 void Heap::RepairFreeListsAfterBoot() {
-  PagedSpaces spaces;
+  PagedSpaces spaces(this);
   for (PagedSpace* space = spaces.next();
        space != NULL;
        space = spaces.next()) {
@@ -5537,9 +5537,10 @@
 void Heap::Print() {
   if (!HasBeenSetUp()) return;
   isolate()->PrintStack();
-  AllSpaces spaces;
-  for (Space* space = spaces.next(); space != NULL; space = spaces.next())
+  AllSpaces spaces(this);
+  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
     space->Print();
+  }
 }
 
 
@@ -6167,7 +6168,7 @@
   *stats->os_error = OS::GetLastError();
       isolate()->memory_allocator()->Available();
   if (take_snapshot) {
-    HeapIterator iterator;
+    HeapIterator iterator(this);
     for (HeapObject* obj = iterator.next();
          obj != NULL;
          obj = iterator.next()) {
@@ -6412,7 +6413,7 @@
 
 void Heap::Shrink() {
   // Try to shrink all paged spaces.
-  PagedSpaces spaces;
+  PagedSpaces spaces(this);
   for (PagedSpace* space = spaces.next();
        space != NULL;
        space = spaces.next()) {
@@ -6485,19 +6486,19 @@
 Space* AllSpaces::next() {
   switch (counter_++) {
     case NEW_SPACE:
-      return HEAP->new_space();
+      return heap_->new_space();
     case OLD_POINTER_SPACE:
-      return HEAP->old_pointer_space();
+      return heap_->old_pointer_space();
     case OLD_DATA_SPACE:
-      return HEAP->old_data_space();
+      return heap_->old_data_space();
     case CODE_SPACE:
-      return HEAP->code_space();
+      return heap_->code_space();
     case MAP_SPACE:
-      return HEAP->map_space();
+      return heap_->map_space();
     case CELL_SPACE:
-      return HEAP->cell_space();
+      return heap_->cell_space();
     case LO_SPACE:
-      return HEAP->lo_space();
+      return heap_->lo_space();
     default:
       return NULL;
   }
@@ -6507,15 +6508,15 @@
 PagedSpace* PagedSpaces::next() {
   switch (counter_++) {
     case OLD_POINTER_SPACE:
-      return HEAP->old_pointer_space();
+      return heap_->old_pointer_space();
     case OLD_DATA_SPACE:
-      return HEAP->old_data_space();
+      return heap_->old_data_space();
     case CODE_SPACE:
-      return HEAP->code_space();
+      return heap_->code_space();
     case MAP_SPACE:
-      return HEAP->map_space();
+      return heap_->map_space();
     case CELL_SPACE:
-      return HEAP->cell_space();
+      return heap_->cell_space();
     default:
       return NULL;
   }
@@ -6526,26 +6527,28 @@
 OldSpace* OldSpaces::next() {
   switch (counter_++) {
     case OLD_POINTER_SPACE:
-      return HEAP->old_pointer_space();
+      return heap_->old_pointer_space();
     case OLD_DATA_SPACE:
-      return HEAP->old_data_space();
+      return heap_->old_data_space();
     case CODE_SPACE:
-      return HEAP->code_space();
+      return heap_->code_space();
     default:
       return NULL;
   }
 }
 
 
-SpaceIterator::SpaceIterator()
-    : current_space_(FIRST_SPACE),
+SpaceIterator::SpaceIterator(Heap* heap)
+    : heap_(heap),
+      current_space_(FIRST_SPACE),
       iterator_(NULL),
       size_func_(NULL) {
 }
 
 
-SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
-    : current_space_(FIRST_SPACE),
+SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
+    : heap_(heap),
+      current_space_(FIRST_SPACE),
       iterator_(NULL),
       size_func_(size_func) {
 }
@@ -6585,25 +6588,26 @@
 
   switch (current_space_) {
     case NEW_SPACE:
-      iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
+      iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
       break;
     case OLD_POINTER_SPACE:
-      iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
+      iterator_ =
+          new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
       break;
     case OLD_DATA_SPACE:
-      iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
+      iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
       break;
     case CODE_SPACE:
-      iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
+      iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
       break;
     case MAP_SPACE:
-      iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
+      iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
       break;
     case CELL_SPACE:
-      iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
+      iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
       break;
     case LO_SPACE:
-      iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
+      iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
       break;
   }
 
@@ -6674,15 +6678,18 @@
 };
 
 
-HeapIterator::HeapIterator()
-    : filtering_(HeapIterator::kNoFiltering),
+HeapIterator::HeapIterator(Heap* heap)
+    : heap_(heap),
+      filtering_(HeapIterator::kNoFiltering),
       filter_(NULL) {
   Init();
 }
 
 
-HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
-    : filtering_(filtering),
+HeapIterator::HeapIterator(Heap* heap,
+                           HeapIterator::HeapObjectsFiltering filtering)
+    : heap_(heap),
+      filtering_(filtering),
       filter_(NULL) {
   Init();
 }
@@ -6695,7 +6702,7 @@
 
 void HeapIterator::Init() {
   // Start the iteration.
-  space_iterator_ = new SpaceIterator;
+  space_iterator_ = new SpaceIterator(heap_);
   switch (filtering_) {
     case kFilterUnreachable:
       filter_ = new UnreachableObjectsFilter;
@@ -6960,9 +6967,9 @@
 #endif
 
 
-static intptr_t CountTotalHolesSize() {
+static intptr_t CountTotalHolesSize(Heap* heap) {
   intptr_t holes_size = 0;
-  OldSpaces spaces;
+  OldSpaces spaces(heap);
   for (OldSpace* space = spaces.next();
        space != NULL;
        space = spaces.next()) {
@@ -6998,7 +7005,7 @@
     scopes_[i] = 0;
   }
 
-  in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
+  in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
 
   allocated_since_last_gc_ =
       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
@@ -7125,7 +7132,7 @@
     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
            in_free_list_or_wasted_before_gc_);
-    PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
+    PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
 
     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
diff --git a/src/heap.h b/src/heap.h
index 5eefc1c..42546af 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -2339,37 +2339,40 @@
 };
 
 
-// Space iterator for iterating over all spaces of the heap.
-// Returns each space in turn, and null when it is done.
+// Space iterator for iterating over all spaces of the heap.  Returns each space
+// in turn, and null when it is done.
 class AllSpaces BASE_EMBEDDED {
  public:
+  explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
   Space* next();
-  AllSpaces() { counter_ = FIRST_SPACE; }
  private:
+  Heap* heap_;
   int counter_;
 };
 
 
 // Space iterator for iterating over all old spaces of the heap: Old pointer
-// space, old data space and code space.
-// Returns each space in turn, and null when it is done.
+// space, old data space and code space.  Returns each space in turn, and null
+// when it is done.
 class OldSpaces BASE_EMBEDDED {
  public:
+  explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
   OldSpace* next();
-  OldSpaces() { counter_ = OLD_POINTER_SPACE; }
  private:
+  Heap* heap_;
   int counter_;
 };
 
 
-// Space iterator for iterating over all the paged spaces of the heap:
-// Map space, old pointer space, old data space, code space and cell space.
-// Returns each space in turn, and null when it is done.
+// Space iterator for iterating over all the paged spaces of the heap: Map
+// space, old pointer space, old data space, code space and cell space.  Returns
+// each space in turn, and null when it is done.
 class PagedSpaces BASE_EMBEDDED {
  public:
+  explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
   PagedSpace* next();
-  PagedSpaces() { counter_ = OLD_POINTER_SPACE; }
  private:
+  Heap* heap_;
   int counter_;
 };
 
@@ -2379,8 +2382,8 @@
 // returned object iterators is handled by the space iterator.
 class SpaceIterator : public Malloced {
  public:
-  SpaceIterator();
-  explicit SpaceIterator(HeapObjectCallback size_func);
+  explicit SpaceIterator(Heap* heap);
+  SpaceIterator(Heap* heap, HeapObjectCallback size_func);
   virtual ~SpaceIterator();
 
   bool has_next();
@@ -2389,6 +2392,7 @@
  private:
   ObjectIterator* CreateIterator();
 
+  Heap* heap_;
   int current_space_;  // from enum AllocationSpace.
   ObjectIterator* iterator_;  // object iterator for the current space.
   HeapObjectCallback size_func_;
@@ -2413,8 +2417,8 @@
     kFilterUnreachable
   };
 
-  HeapIterator();
-  explicit HeapIterator(HeapObjectsFiltering filtering);
+  explicit HeapIterator(Heap* heap);
+  HeapIterator(Heap* heap, HeapObjectsFiltering filtering);
   ~HeapIterator();
 
   HeapObject* next();
@@ -2427,6 +2431,7 @@
   void Shutdown();
   HeapObject* NextObject();
 
+  Heap* heap_;
   HeapObjectsFiltering filtering_;
   HeapObjectsFilter* filter_;
   // Space iterator for iterating all the spaces.
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 11cc901..2d4b82b 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -405,9 +405,10 @@
   if (dominator->block() != dominated->block()) {
     return dominator->block()->Dominates(dominated->block());
   } else {
-    // If both arguments are in the same block we check if "dominator" has
-    // already been processed or if it is a phi: if yes it dominates the other.
-    return dominator->CheckFlag(kIDefsProcessingDone) || dominator->IsPhi();
+    // If both arguments are in the same block we check if dominator is a phi
+    // or if dominated has not already been processed: in either case we know
+    // that dominator precedes dominated.
+    return dominator->IsPhi() || !dominated->CheckFlag(kIDefsProcessingDone);
   }
 }
 
@@ -524,6 +525,16 @@
 }
 
 
+bool HValue::IsInteger32Constant() {
+  return IsConstant() && HConstant::cast(this)->HasInteger32Value();
+}
+
+
+int32_t HValue::GetInteger32Constant() {
+  return HConstant::cast(this)->Integer32Value();
+}
+
+
 void HValue::SetOperandAt(int index, HValue* value) {
   RegisterUse(index, value);
   InternalSetOperandAt(index, value);
@@ -790,10 +801,48 @@
   // HValue::DataEquals.  The default implementation is UNREACHABLE.  We
   // don't actually care whether DataEquals returns true or false here.
   if (CheckFlag(kUseGVN)) DataEquals(this);
+
+  // Verify that all uses are in the graph.
+  for (HUseIterator use = uses(); !use.Done(); use.Advance()) {
+    if (use.value()->IsInstruction()) {
+      ASSERT(HInstruction::cast(use.value())->IsLinked());
+    }
+  }
 }
 #endif
 
 
+HNumericConstraint* HNumericConstraint::AddToGraph(
+    HValue* constrained_value,
+    NumericRelation relation,
+    HValue* related_value,
+    HInstruction* insertion_point) {
+  if (insertion_point == NULL) {
+    if (constrained_value->IsInstruction()) {
+      insertion_point = HInstruction::cast(constrained_value);
+    } else if (constrained_value->IsPhi()) {
+      insertion_point = constrained_value->block()->first();
+    } else {
+      UNREACHABLE();
+    }
+  }
+  HNumericConstraint* result =
+      new(insertion_point->block()->zone()) HNumericConstraint(
+          constrained_value, relation, related_value);
+  result->InsertAfter(insertion_point);
+  return result;
+}
+
+
+void HNumericConstraint::PrintDataTo(StringStream* stream) {
+  stream->Add("(");
+  constrained_value()->PrintNameTo(stream);
+  stream->Add(" %s ", relation().Mnemonic());
+  related_value()->PrintNameTo(stream);
+  stream->Add(")");
+}
+
+
 void HDummyUse::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
 }
@@ -815,10 +864,38 @@
 }
 
 
+void HBoundsCheck::AddInformativeDefinitions() {
+  // TODO(mmassi): Executing this code during AddInformativeDefinitions
+  // is a hack. Move it to some other HPhase.
+  if (index()->IsRelationTrue(NumericRelation::Ge(),
+                              block()->graph()->GetConstant0()) &&
+      index()->IsRelationTrue(NumericRelation::Lt(), length())) {
+    set_skip_check(true);
+  }
+}
+
+
+bool HBoundsCheck::IsRelationTrueInternal(NumericRelation relation,
+                                          HValue* related_value) {
+  if (related_value == length()) {
+    // A HBoundsCheck is smaller than the length it compared against.
+    return NumericRelation::Lt().Implies(relation);
+  } else if (related_value == block()->graph()->GetConstant0()) {
+    // A HBoundsCheck is greater than or equal to zero.
+    return NumericRelation::Ge().Implies(relation);
+  } else {
+    return false;
+  }
+}
+
+
 void HBoundsCheck::PrintDataTo(StringStream* stream) {
   index()->PrintNameTo(stream);
   stream->Add(" ");
   length()->PrintNameTo(stream);
+  if (skip_check()) {
+    stream->Add(" [DISABLED]");
+  }
 }
 
 
@@ -829,9 +906,8 @@
       !length()->representation().IsTagged()) {
     r = Representation::Integer32();
   } else if (index()->representation().IsTagged() ||
-      (index()->IsConstant() &&
-       HConstant::cast(index())->HasInteger32Value() &&
-       Smi::IsValid(HConstant::cast(index())->Integer32Value()))) {
+      (index()->ActualValue()->IsConstant() &&
+       HConstant::cast(index()->ActualValue())->HasSmiValue())) {
     // If the index is tagged, or a constant that holds a Smi, allow the length
     // to be tagged, since it is usually already tagged from loading it out of
     // the length field of a JSArray. This allows for direct comparison without
@@ -1476,6 +1552,38 @@
 }
 
 
+void HPhi::AddInformativeDefinitions() {
+  if (OperandCount() == 2) {
+    for (int operand_index = 0; operand_index < 2; operand_index++) {
+      int other_operand_index = (operand_index + 1) % 2;
+
+      // Add an idef that "discards" the OSR entry block branch.
+      if (OperandAt(operand_index)->block()->is_osr_entry()) {
+        HNumericConstraint::AddToGraph(
+            this, NumericRelation::Eq(), OperandAt(other_operand_index));
+      }
+
+      static NumericRelation relations[] = {
+        NumericRelation::Ge(),
+        NumericRelation::Le()
+      };
+
+      // Check if this phi is an induction variable. If, e.g., we know that
+      // its first input is greater than the phi itself, then that must be
+      // the back edge, and the phi is always greater than its second input.
+      for (int relation_index = 0; relation_index < 2; relation_index++) {
+        if (OperandAt(operand_index)->IsRelationTrue(relations[relation_index],
+                                                     this)) {
+          HNumericConstraint::AddToGraph(this,
+                                         relations[relation_index],
+                                         OperandAt(other_operand_index));
+        }
+      }
+    }
+  }
+}
+
+
 Range* HMathMinMax::InferRange(Zone* zone) {
   if (representation().IsInteger32()) {
     Range* a = left()->range();
@@ -1937,6 +2045,16 @@
 }
 
 
+void HCompareIDAndBranch::AddInformativeDefinitions() {
+  NumericRelation r = NumericRelation::FromToken(token());
+  if (r.IsNone()) return;
+
+  HNumericConstraint::AddToGraph(left(), r, right(), SuccessorAt(0)->first());
+  HNumericConstraint::AddToGraph(
+        left(), r.Negated(), right(), SuccessorAt(1)->first());
+}
+
+
 void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add(Token::Name(token()));
   stream->Add(" ");
@@ -2392,6 +2510,14 @@
 }
 
 
+void HCheckSmiOrInt32::InferRepresentation(HInferRepresentation* h_infer) {
+  ASSERT(CheckFlag(kFlexibleRepresentation));
+  Representation r = value()->representation().IsTagged()
+      ? Representation::Tagged() : Representation::Integer32();
+  UpdateRepresentation(r, h_infer, "checksmiorint32");
+}
+
+
 HType HPhi::CalculateInferredType() {
   HType result = HType::Uninitialized();
   for (int i = 0; i < OperandCount(); ++i) {
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 53f1b65..2767560 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -92,6 +92,7 @@
   V(CheckNonSmi)                               \
   V(CheckPrototypeMaps)                        \
   V(CheckSmi)                                  \
+  V(CheckSmiOrInt32)                           \
   V(ClampToUint8)                              \
   V(ClassOfTestAndBranch)                      \
   V(CompareIDAndBranch)                        \
@@ -146,6 +147,7 @@
   V(MathMinMax)                                \
   V(Mod)                                       \
   V(Mul)                                       \
+  V(NumericConstraint)                         \
   V(ObjectLiteral)                             \
   V(OsrEntry)                                  \
   V(OuterContext)                              \
@@ -549,6 +551,127 @@
 #undef COUNT_FLAG
 };
 
+
+class NumericRelation {
+ public:
+  enum Kind { NONE, EQ, GT, GE, LT, LE, NE };
+  static const char* MnemonicFromKind(Kind kind) {
+    switch (kind) {
+      case NONE: return "NONE";
+      case EQ: return "EQ";
+      case GT: return "GT";
+      case GE: return "GE";
+      case LT: return "LT";
+      case LE: return "LE";
+      case NE: return "NE";
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+  const char* Mnemonic() const { return MnemonicFromKind(kind_); }
+
+  static NumericRelation None() { return NumericRelation(NONE); }
+  static NumericRelation Eq() { return NumericRelation(EQ); }
+  static NumericRelation Gt() { return NumericRelation(GT); }
+  static NumericRelation Ge() { return NumericRelation(GE); }
+  static NumericRelation Lt() { return NumericRelation(LT); }
+  static NumericRelation Le() { return NumericRelation(LE); }
+  static NumericRelation Ne() { return NumericRelation(NE); }
+
+  bool IsNone() { return kind_ == NONE; }
+
+  static NumericRelation FromToken(Token::Value token) {
+    switch (token) {
+      case Token::EQ: return Eq();
+      case Token::EQ_STRICT: return Eq();
+      case Token::LT: return Lt();
+      case Token::GT: return Gt();
+      case Token::LTE: return Le();
+      case Token::GTE: return Ge();
+      case Token::NE: return Ne();
+      case Token::NE_STRICT: return Ne();
+      default: return None();
+    }
+  }
+
+  // The semantics of "Reversed" is that if "x rel y" is true then also
+  // "y rel.Reversed() x" is true, and that rel.Reversed().Reversed() == rel.
+  NumericRelation Reversed() {
+    switch (kind_) {
+      case NONE: return None();
+      case EQ: return Eq();
+      case GT: return Lt();
+      case GE: return Le();
+      case LT: return Gt();
+      case LE: return Ge();
+      case NE: return Ne();
+    }
+    UNREACHABLE();
+    return None();
+  }
+
+  // The semantics of "Negated" is that if "x rel y" is true then also
+  // "!(x rel.Negated() y)" is true.
+  NumericRelation Negated() {
+    switch (kind_) {
+      case NONE: return None();
+      case EQ: return Ne();
+      case GT: return Le();
+      case GE: return Lt();
+      case LT: return Ge();
+      case LE: return Gt();
+      case NE: return Eq();
+    }
+    UNREACHABLE();
+    return None();
+  }
+
+  // The semantics of "Implies" is that if "x rel y" is true
+  // then also "x other_relation y" is true.
+  bool Implies(NumericRelation other_relation) {
+    switch (kind_) {
+      case NONE: return false;
+      case EQ: return (other_relation.kind_ == EQ)
+          || (other_relation.kind_ == GE)
+          || (other_relation.kind_ == LE);
+      case GT: return (other_relation.kind_ == GT)
+          || (other_relation.kind_ == GE)
+          || (other_relation.kind_ == NE);
+      case LT: return (other_relation.kind_ == LT)
+          || (other_relation.kind_ == LE)
+          || (other_relation.kind_ == NE);
+      case GE: return (other_relation.kind_ == GE);
+      case LE: return (other_relation.kind_ == LE);
+      case NE: return (other_relation.kind_ == NE);
+    }
+    UNREACHABLE();
+    return false;
+  }
+
+  // The semantics of "IsExtendable" is that if
+  // "rel.IsExtendable(direction)" is true then
+  // "x rel y" implies "(x + direction) rel y" .
+  bool IsExtendable(int direction) {
+    switch (kind_) {
+      case NONE: return false;
+      case EQ: return false;
+      case GT: return (direction >= 0);
+      case GE: return (direction >= 0);
+      case LT: return (direction <= 0);
+      case LE: return (direction <= 0);
+      case NE: return false;
+    }
+    UNREACHABLE();
+    return false;
+  }
+
+ private:
+  explicit NumericRelation(Kind kind) : kind_(kind) {}
+
+  Kind kind_;
+};
+
+
 typedef EnumSet<GVNFlag> GVNFlagSet;
 
 
@@ -712,6 +835,9 @@
     UpdateRedefinedUsesInner<Dominates>();
   }
 
+  bool IsInteger32Constant();
+  int32_t GetInteger32Constant();
+
   bool IsDefinedAfter(HBasicBlock* other) const;
 
   // Operands.
@@ -838,6 +964,24 @@
   virtual void Verify() = 0;
 #endif
 
+  // This method is recursive but it is guaranteed to terminate because
+  // RedefinedOperand() always dominates "this".
+  bool IsRelationTrue(NumericRelation relation, HValue* other) {
+    if (this == other) {
+      return NumericRelation::Eq().Implies(relation);
+    }
+
+    bool result = IsRelationTrueInternal(relation, other) ||
+        other->IsRelationTrueInternal(relation.Reversed(), this);
+    if (!result) {
+      HValue* redefined = RedefinedOperand();
+      if (redefined != NULL) {
+        result = redefined->IsRelationTrue(relation, other);
+      }
+    }
+    return result;
+  }
+
  protected:
   // This function must be overridden for instructions with flag kUseGVN, to
   // compare the non-Operand parts of the instruction.
@@ -900,6 +1044,12 @@
     }
   }
 
+  // Informative definitions can override this method to state any numeric
+  // relation they provide on the redefined value.
+  virtual bool IsRelationTrueInternal(NumericRelation relation, HValue* other) {
+    return false;
+  }
+
   static GVNFlagSet AllDependsOnFlagSet() {
     GVNFlagSet result;
     // Create changes mask.
@@ -1124,6 +1274,50 @@
 };
 
 
+class HNumericConstraint : public HTemplateInstruction<2> {
+ public:
+  static HNumericConstraint* AddToGraph(HValue* constrained_value,
+                                        NumericRelation relation,
+                                        HValue* related_value,
+                                        HInstruction* insertion_point = NULL);
+
+  HValue* constrained_value() { return OperandAt(0); }
+  HValue* related_value() { return OperandAt(1); }
+  NumericRelation relation() { return relation_; }
+
+  virtual int RedefinedOperandIndex() { return 0; }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return representation();
+  }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual bool IsRelationTrueInternal(NumericRelation other_relation,
+                                      HValue* other_related_value) {
+    if (related_value() == other_related_value) {
+      return relation().Implies(other_relation);
+    } else {
+      return false;
+    }
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumericConstraint)
+
+ private:
+  HNumericConstraint(HValue* constrained_value,
+                     NumericRelation relation,
+                     HValue* related_value)
+      : relation_(relation) {
+    SetOperandAt(0, constrained_value);
+    SetOperandAt(1, related_value);
+    set_representation(constrained_value->representation());
+  }
+
+  NumericRelation relation_;
+};
+
+
 // We insert soft-deoptimize when we hit code with unknown typefeedback,
 // so that we get a chance of re-optimizing with useful typefeedback.
 // HSoftDeoptimize does not end a basic block as opposed to HDeoptimize.
@@ -1545,7 +1739,7 @@
     // The stack check eliminator might try to eliminate the same stack
     // check instruction multiple times.
     if (IsLinked()) {
-      DeleteFromGraph();
+      DeleteAndReplaceWith(NULL);
     }
   }
 
@@ -2612,6 +2806,34 @@
 };
 
 
+class HCheckSmiOrInt32: public HUnaryOperation {
+ public:
+  explicit HCheckSmiOrInt32(HValue* value) : HUnaryOperation(value) {
+    SetFlag(kFlexibleRepresentation);
+    SetFlag(kUseGVN);
+  }
+
+  virtual int RedefinedOperandIndex() { return 0; }
+  virtual Representation RequiredInputRepresentation(int index) {
+    return representation();
+  }
+  virtual void InferRepresentation(HInferRepresentation* h_infer);
+
+  virtual HValue* Canonicalize() {
+    if (representation().IsTagged() && !type().IsSmi()) {
+      return this;
+    } else {
+      return value();
+    }
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmiOrInt32)
+
+ protected:
+  virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
 class HPhi: public HValue {
  public:
   HPhi(int merged_index, Zone* zone)
@@ -2648,6 +2870,8 @@
 
   int merged_index() const { return merged_index_; }
 
+  virtual void AddInformativeDefinitions();
+
   virtual void PrintTo(StringStream* stream);
 
 #ifdef DEBUG
@@ -2807,6 +3031,9 @@
     ASSERT(HasInteger32Value());
     return int32_value_;
   }
+  bool HasSmiValue() const {
+    return HasInteger32Value() && Smi::IsValid(Integer32Value());
+  }
   bool HasDoubleValue() const { return has_double_value_; }
   double DoubleValue() const {
     ASSERT(HasDoubleValue());
@@ -3088,15 +3315,21 @@
 
 class HBoundsCheck: public HTemplateInstruction<2> {
  public:
-  HBoundsCheck(HValue* index, HValue* length,
+  // Normally HBoundsCheck should be created using the
+  // HGraphBuilder::AddBoundsCheck() helper, which also guards the index with
+  // a HCheckSmiOrInt32 check.
+  // However when building stubs, where we know that the arguments are Int32,
+  // it makes sense to invoke this constructor directly.
+  HBoundsCheck(HValue* index,
+               HValue* length,
                BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY,
                Representation r = Representation::None())
-      : key_mode_(key_mode) {
+      : key_mode_(key_mode), skip_check_(false) {
     SetOperandAt(0, index);
     SetOperandAt(1, length);
     if (r.IsNone()) {
       // In the normal compilation pipeline the representation is flexible
-      // (see comment to RequiredInputRepresentation).
+      // (see InferRepresentation).
       SetFlag(kFlexibleRepresentation);
     } else {
       // When compiling stubs we want to set the representation explicitly
@@ -3106,6 +3339,9 @@
     SetFlag(kUseGVN);
   }
 
+  bool skip_check() { return skip_check_; }
+  void set_skip_check(bool skip_check) { skip_check_ = skip_check; }
+
   virtual Representation RequiredInputRepresentation(int arg_index) {
     return representation();
   }
@@ -3113,6 +3349,9 @@
     return Representation::Integer32();
   }
 
+  virtual bool IsRelationTrueInternal(NumericRelation relation,
+                                      HValue* related_value);
+
   virtual void PrintDataTo(StringStream* stream);
   virtual void InferRepresentation(HInferRepresentation* h_infer);
 
@@ -3120,12 +3359,14 @@
   HValue* length() { return OperandAt(1); }
 
   virtual int RedefinedOperandIndex() { return 0; }
+  virtual void AddInformativeDefinitions();
 
   DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
   BoundsCheckKeyMode key_mode_;
+  bool skip_check_;
 };
 
 
@@ -3295,6 +3536,8 @@
   }
   virtual void PrintDataTo(StringStream* stream);
 
+  virtual void AddInformativeDefinitions();
+
   DECLARE_CONCRETE_INSTRUCTION(CompareIDAndBranch)
 
  private:
@@ -3699,6 +3942,23 @@
 
   virtual HValue* Canonicalize();
 
+  virtual bool IsRelationTrueInternal(NumericRelation relation, HValue* other) {
+    HValue* base = NULL;
+    int32_t offset = 0;
+    if (left()->IsInteger32Constant()) {
+      base = right();
+      offset = left()->GetInteger32Constant();
+    } else if (right()->IsInteger32Constant()) {
+      base = left();
+      offset = right()->GetInteger32Constant();
+    } else {
+      return false;
+    }
+
+    return relation.IsExtendable(offset)
+        ? base->IsRelationTrue(relation, other) : false;
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(Add)
 
  protected:
@@ -3724,6 +3984,17 @@
                                HValue* left,
                                HValue* right);
 
+  virtual bool IsRelationTrueInternal(NumericRelation relation, HValue* other) {
+    if (right()->IsInteger32Constant()) {
+      HValue* base = left();
+      int32_t offset = right()->GetInteger32Constant();
+      return relation.IsExtendable(-offset)
+          ? base->IsRelationTrue(relation, other) : false;
+    } else {
+      return false;
+    }
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(Sub)
 
  protected:
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 00207c1..e242c38 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -71,7 +71,8 @@
       parent_loop_header_(NULL),
       is_inline_return_target_(false),
       is_deoptimizing_(false),
-      dominates_loop_successors_(false) { }
+      dominates_loop_successors_(false),
+      is_osr_entry_(false) { }
 
 
 void HBasicBlock::AttachLoopInformation() {
@@ -779,6 +780,20 @@
 }
 
 
+HBoundsCheck* HGraphBuilder::AddBoundsCheck(HValue* index,
+                                            HValue* length,
+                                            BoundsCheckKeyMode key_mode,
+                                            Representation r) {
+  HCheckSmiOrInt32* checked_index =
+      new(graph()->zone()) HCheckSmiOrInt32(index);
+  AddInstruction(checked_index);
+  HBoundsCheck* result = new(graph()->zone()) HBoundsCheck(
+      checked_index, length, key_mode, r);
+  AddInstruction(result);
+  return result;
+}
+
+
 HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
   HBasicBlock* b = graph()->CreateBasicBlock();
   b->SetInitialEnvironment(env);
@@ -918,8 +933,8 @@
   HInstruction* checked_key = NULL;
   if (IsExternalArrayElementsKind(elements_kind)) {
     length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
-    checked_key = AddInstruction(new(zone) HBoundsCheck(
-        key, length, ALLOW_SMI_KEY, checked_index_representation));
+    checked_key = AddBoundsCheck(
+        key, length, ALLOW_SMI_KEY, checked_index_representation);
     HLoadExternalArrayPointer* external_elements =
         new(zone) HLoadExternalArrayPointer(elements);
     AddInstruction(external_elements);
@@ -936,8 +951,8 @@
   } else {
     length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
   }
-  checked_key = AddInstruction(new(zone) HBoundsCheck(
-      key, length, ALLOW_SMI_KEY, checked_index_representation));
+  checked_key = AddBoundsCheck(
+      key, length, ALLOW_SMI_KEY, checked_index_representation);
   return BuildFastElementAccess(elements, checked_key, val, mapcheck,
                                 elements_kind, is_store);
 }
@@ -1812,7 +1827,7 @@
     if (test->SecondSuccessor() == dest) {
       op = Token::NegateCompareOp(op);
     }
-    Token::Value inverted_op = Token::InvertCompareOp(op);
+    Token::Value inverted_op = Token::ReverseCompareOp(op);
     UpdateControlFlowRange(op, test->left(), test->right());
     UpdateControlFlowRange(inverted_op, test->right(), test->left());
   }
@@ -2706,7 +2721,8 @@
           map->Add(instr, zone());
         }
       }
-      if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
+      if (instr->IsLinked() &&
+          instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
         for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
           HValue* other = dominators->at(i);
           GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
@@ -3779,6 +3795,12 @@
   OrderBlocks();
   AssignDominators();
 
+  // We need to create a HConstant "zero" now so that GVN will fold every
+  // zero-valued constant in the graph together.
+  // The constant is needed to make idef-based bounds check work: the pass
+  // evaluates relations with "zero" and that zero cannot be created after GVN.
+  GetConstant0();
+
 #ifdef DEBUG
   // Do a full verify after building the graph and computing dominators.
   Verify(true);
@@ -3856,12 +3878,14 @@
   for (int phi_index = 0; phi_index < block->phis()->length(); phi_index++) {
     HPhi* phi = block->phis()->at(phi_index);
     phi->AddInformativeDefinitions();
+    phi->SetFlag(HValue::kIDefsProcessingDone);
     // We do not support phis that "redefine just one operand".
     ASSERT(!phi->IsInformativeDefinition());
   }
 
   for (HInstruction* i = block->first(); i != NULL; i = i->next()) {
     i->AddInformativeDefinitions();
+    i->SetFlag(HValue::kIDefsProcessingDone);
     i->UpdateRedefinedUsesWhileSettingUpInformativeDefinitions();
   }
 }
@@ -4882,6 +4906,7 @@
   non_osr_entry->Goto(loop_predecessor);
 
   set_current_block(osr_entry);
+  osr_entry->set_osr_entry();
   BailoutId osr_entry_id = statement->OsrEntryId();
   int first_expression_index = environment()->first_expression_index();
   int length = environment()->length();
@@ -6833,7 +6858,7 @@
         && todo_external_array) {
       HInstruction* length =
           AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
-      checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+      checked_key = AddBoundsCheck(key, length);
       external_elements = new(zone()) HLoadExternalArrayPointer(elements);
       AddInstruction(external_elements);
     }
@@ -6875,8 +6900,7 @@
         HInstruction* length;
         length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck,
                                                            HType::Smi()));
-        checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
-                                                              ALLOW_SMI_KEY));
+        checked_key = AddBoundsCheck(key, length, ALLOW_SMI_KEY);
         access = AddInstruction(BuildFastElementAccess(
             elements, checked_key, val, elements_kind_branch,
             elements_kind, is_store));
@@ -6892,8 +6916,7 @@
 
         set_current_block(if_fastobject);
         length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
-        checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
-                                                              ALLOW_SMI_KEY));
+        checked_key = AddBoundsCheck(key, length, ALLOW_SMI_KEY);
         access = AddInstruction(BuildFastElementAccess(
             elements, checked_key, val, elements_kind_branch,
             elements_kind, is_store));
@@ -7042,8 +7065,7 @@
           new(zone()) HArgumentsElements(false));
       HInstruction* length = AddInstruction(
           new(zone()) HArgumentsLength(elements));
-      HInstruction* checked_key =
-          AddInstruction(new(zone()) HBoundsCheck(key, length));
+      HInstruction* checked_key = AddBoundsCheck(key, length);
       result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
     } else {
       EnsureArgumentsArePushedForAccess();
@@ -7055,8 +7077,7 @@
       HInstruction* length = AddInstruction(new(zone()) HConstant(
         Handle<Object>(Smi::FromInt(argument_count)),
         Representation::Integer32()));
-      HInstruction* checked_key =
-          AddInstruction(new(zone()) HBoundsCheck(key, length));
+      HInstruction* checked_key = AddBoundsCheck(key, length);
       result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
     }
   }
@@ -8770,8 +8791,7 @@
   AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
   HStringLength* length = new(zone()) HStringLength(string);
   AddInstruction(length);
-  HInstruction* checked_index =
-      AddInstruction(new(zone()) HBoundsCheck(index, length));
+  HInstruction* checked_index = AddBoundsCheck(index, length);
   return new(zone()) HStringCharCodeAt(context, string, checked_index);
 }
 
@@ -9599,8 +9619,7 @@
   HInstruction* elements = AddInstruction(
       new(zone()) HArgumentsElements(false));
   HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
-  HInstruction* checked_index =
-      AddInstruction(new(zone()) HBoundsCheck(index, length));
+  HInstruction* checked_index = AddBoundsCheck(index, length);
   HAccessArgumentsAt* result =
       new(zone()) HAccessArgumentsAt(elements, length, checked_index);
   return ast_context()->ReturnInstruction(result, call->id());
diff --git a/src/hydrogen.h b/src/hydrogen.h
index e9b8a2f..180882e 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -91,6 +91,8 @@
   void set_last_instruction_index(int index) {
     last_instruction_index_ = index;
   }
+  bool is_osr_entry() { return is_osr_entry_; }
+  void set_osr_entry() { is_osr_entry_ = true; }
 
   void AttachLoopInformation();
   void DetachLoopInformation();
@@ -193,6 +195,7 @@
   bool is_inline_return_target_;
   bool is_deoptimizing_;
   bool dominates_loop_successors_;
+  bool is_osr_entry_;
 };
 
 
@@ -875,6 +878,11 @@
   HInstruction* AddInstruction(HInstruction* instr);
   void AddSimulate(BailoutId id,
                    RemovableSimulate removable = FIXED_SIMULATE);
+  HBoundsCheck* AddBoundsCheck(
+      HValue* index,
+      HValue* length,
+      BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY,
+      Representation r = Representation::None());
 
  protected:
   virtual bool BuildGraph() = 0;
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index d9cd90d..5affd6e 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -747,25 +747,14 @@
                                  Label* non_float,
                                  Register scratch);
 
-  // Checks that the two floating point numbers on top of the FPU stack
-  // have int32 values.
-  static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
-                                         Label* non_int32);
-
   // Takes the operands in edx and eax and loads them as integers in eax
   // and ecx.
   static void LoadUnknownsAsIntegers(MacroAssembler* masm,
                                      bool use_sse3,
+                                     BinaryOpIC::TypeInfo left_type,
+                                     BinaryOpIC::TypeInfo right_type,
                                      Label* operand_conversion_failure);
 
-  // Must only be called after LoadUnknownsAsIntegers.  Assumes that the
-  // operands are pushed on the stack, and that their conversions to int32
-  // are in eax and ecx.  Checks that the original numbers were in the int32
-  // range.
-  static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
-                                           bool use_sse3,
-                                           Label* not_int32);
-
   // Assumes that operands are smis or heap numbers and loads them
   // into xmm0 and xmm1. Operands are in edx and eax.
   // Leaves operands unchanged.
@@ -787,9 +776,12 @@
                                         Label* non_int32,
                                         Register scratch);
 
+  // Checks that |operand| has an int32 value. If |int32_result| is different
+  // from |scratch|, it will contain that int32 value.
   static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
                                       Label* non_int32,
                                       XMMRegister operand,
+                                      Register int32_result,
                                       Register scratch,
                                       XMMRegister xmm_scratch);
 };
@@ -923,6 +915,18 @@
 }
 
 
+// Uses SSE2 to convert the heap number in |source| to an integer. Jumps to
+// |conversion_failure| if the heap number did not contain an int32 value.
+// Result is in ecx. Trashes ebx, xmm0, and xmm1.
+static void ConvertHeapNumberToInt32(MacroAssembler* masm,
+                                     Register source,
+                                     Label* conversion_failure) {
+  __ movdbl(xmm0, FieldOperand(source, HeapNumber::kValueOffset));
+  FloatingPointHelper::CheckSSE2OperandIsInt32(
+      masm, conversion_failure, xmm0, ecx, ebx, xmm1);
+}
+
+
 void UnaryOpStub::PrintName(StringStream* stream) {
   const char* op_name = Token::Name(op_);
   const char* overwrite_name = NULL;  // Make g++ happy.
@@ -1797,7 +1801,7 @@
           // Check result type if it is currently Int32.
           if (result_type_ <= BinaryOpIC::INT32) {
             FloatingPointHelper::CheckSSE2OperandIsInt32(
-                masm, &not_int32, xmm0, ecx, xmm2);
+                masm, &not_int32, xmm0, ecx, ecx, xmm2);
           }
           BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -1809,7 +1813,6 @@
             masm,
             ecx,
             FloatingPointHelper::ARGS_IN_REGISTERS);
-        FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
         if (op_ == Token::MOD) {
           // The operands are now on the FPU stack, but we don't need them.
           __ fstp(0);
@@ -1851,15 +1854,9 @@
       Label not_floats;
       Label not_int32;
       Label non_smi_result;
-      // We do not check the input arguments here, as any value is
-      // unconditionally truncated to an int32 anyway. To get the
-      // right optimized code, int32 type feedback is just right.
       bool use_sse3 = platform_specific_bit_;
-      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
-                                                  use_sse3,
-                                                  &not_floats);
-      FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3,
-                                                        &not_int32);
+      FloatingPointHelper::LoadUnknownsAsIntegers(
+          masm, use_sse3, left_type_, right_type_, &not_floats);
       switch (op_) {
         case Token::BIT_OR:  __ or_(eax, ecx); break;
         case Token::BIT_AND: __ and_(eax, ecx); break;
@@ -2013,11 +2010,11 @@
         FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
         if (left_type_ == BinaryOpIC::INT32) {
           FloatingPointHelper::CheckSSE2OperandIsInt32(
-              masm, &not_floats, xmm0, ecx, xmm2);
+              masm, &not_floats, xmm0, ecx, ecx, xmm2);
         }
         if (right_type_ == BinaryOpIC::INT32) {
           FloatingPointHelper::CheckSSE2OperandIsInt32(
-              masm, &not_floats, xmm1, ecx, xmm2);
+              masm, &not_floats, xmm1, ecx, ecx, xmm2);
         }
 
         switch (op_) {
@@ -2075,9 +2072,8 @@
       // unconditionally truncated to an int32 anyway. To get the
       // right optimized code, int32 type feedback is just right.
       bool use_sse3 = platform_specific_bit_;
-      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
-                                                  use_sse3,
-                                                  &not_floats);
+      FloatingPointHelper::LoadUnknownsAsIntegers(
+                masm, use_sse3, left_type_, right_type_, &not_floats);
       switch (op_) {
         case Token::BIT_OR:  __ or_(eax, ecx); break;
         case Token::BIT_AND: __ and_(eax, ecx); break;
@@ -2256,6 +2252,8 @@
       bool use_sse3 = platform_specific_bit_;
       FloatingPointHelper::LoadUnknownsAsIntegers(masm,
                                                   use_sse3,
+                                                  BinaryOpIC::GENERIC,
+                                                  BinaryOpIC::GENERIC,
                                                   &call_runtime);
       switch (op_) {
         case Token::BIT_OR:  __ or_(eax, ecx); break;
@@ -2736,16 +2734,24 @@
 
 // Input: edx, eax are the left and right objects of a bit op.
 // Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
-                                                 bool use_sse3,
-                                                 Label* conversion_failure) {
+// Warning: can clobber inputs even when it jumps to |conversion_failure|!
+void FloatingPointHelper::LoadUnknownsAsIntegers(
+    MacroAssembler* masm,
+    bool use_sse3,
+    BinaryOpIC::TypeInfo left_type,
+    BinaryOpIC::TypeInfo right_type,
+    Label* conversion_failure) {
   // Check float operands.
   Label arg1_is_object, check_undefined_arg1;
   Label arg2_is_object, check_undefined_arg2;
   Label load_arg2, done;
 
   // Test if arg1 is a Smi.
-  __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
+  if (left_type == BinaryOpIC::SMI) {
+    __ JumpIfNotSmi(edx, conversion_failure);
+  } else {
+    __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
+  }
 
   __ SmiUntag(edx);
   __ jmp(&load_arg2);
@@ -2764,14 +2770,23 @@
   __ j(not_equal, &check_undefined_arg1);
 
   // Get the untagged integer version of the edx heap number in ecx.
-  IntegerConvert(masm, edx, use_sse3, conversion_failure);
+  if (left_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    ConvertHeapNumberToInt32(masm, edx, conversion_failure);
+  } else {
+    IntegerConvert(masm, edx, use_sse3, conversion_failure);
+  }
   __ mov(edx, ecx);
 
   // Here edx has the untagged integer, eax has a Smi or a heap number.
   __ bind(&load_arg2);
 
   // Test if arg2 is a Smi.
-  __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
+  if (right_type == BinaryOpIC::SMI) {
+    __ JumpIfNotSmi(eax, conversion_failure);
+  } else {
+    __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
+  }
 
   __ SmiUntag(eax);
   __ mov(ecx, eax);
@@ -2788,21 +2803,20 @@
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ cmp(ebx, factory->heap_number_map());
   __ j(not_equal, &check_undefined_arg2);
-
   // Get the untagged integer version of the eax heap number in ecx.
-  IntegerConvert(masm, eax, use_sse3, conversion_failure);
+
+  if (right_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    ConvertHeapNumberToInt32(masm, eax, conversion_failure);
+  } else {
+    IntegerConvert(masm, eax, use_sse3, conversion_failure);
+  }
+
   __ bind(&done);
   __ mov(eax, edx);
 }
 
 
-void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
-                                                       bool use_sse3,
-                                                       Label* not_int32) {
-  return;
-}
-
-
 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
                                            Register number) {
   Label load_smi, done;
@@ -2897,18 +2911,19 @@
 void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
                                                     Label* non_int32,
                                                     Register scratch) {
-  CheckSSE2OperandIsInt32(masm, non_int32, xmm0, scratch, xmm2);
-  CheckSSE2OperandIsInt32(masm, non_int32, xmm1, scratch, xmm2);
+  CheckSSE2OperandIsInt32(masm, non_int32, xmm0, scratch, scratch, xmm2);
+  CheckSSE2OperandIsInt32(masm, non_int32, xmm1, scratch, scratch, xmm2);
 }
 
 
 void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
                                                   Label* non_int32,
                                                   XMMRegister operand,
+                                                  Register int32_result,
                                                   Register scratch,
                                                   XMMRegister xmm_scratch) {
-  __ cvttsd2si(scratch, Operand(operand));
-  __ cvtsi2sd(xmm_scratch, scratch);
+  __ cvttsd2si(int32_result, Operand(operand));
+  __ cvtsi2sd(xmm_scratch, int32_result);
   __ pcmpeqd(xmm_scratch, operand);
   __ movmskps(scratch, xmm_scratch);
   // Two least significant bits should be both set.
@@ -2998,12 +3013,6 @@
 }
 
 
-void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
-                                                     Label* non_int32) {
-  return;
-}
-
-
 void MathPowStub::Generate(MacroAssembler* masm) {
   CpuFeatures::Scope use_sse2(SSE2);
   Factory* factory = masm->isolate()->factory();
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 05152b7..a1b467d 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -860,44 +860,38 @@
     __ popfd();
   }
 
+  if (FLAG_trap_on_deopt) {
+    Label done;
+    if (cc != no_condition) {
+      __ j(NegateCondition(cc), &done, Label::kNear);
+    }
+    __ int3();
+    __ bind(&done);
+  }
+
   ASSERT(info()->IsStub() || frame_is_built_);
-  bool lazy_deopt_needed = info()->IsStub();
-  if (cc == no_condition) {
-    if (FLAG_trap_on_deopt) __ int3();
-    if (lazy_deopt_needed) {
+  bool needs_lazy_deopt = info()->IsStub();
+  if (cc == no_condition && frame_is_built_) {
+    if (needs_lazy_deopt) {
       __ call(entry, RelocInfo::RUNTIME_ENTRY);
     } else {
       __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
     }
   } else {
-    Label done;
-    if (FLAG_trap_on_deopt) {
-      __ j(NegateCondition(cc), &done, Label::kNear);
-      __ int3();
+    // We often have several deopts to the same entry, reuse the last
+    // jump entry if this is the case.
+    if (jump_table_.is_empty() ||
+        jump_table_.last().address != entry ||
+        jump_table_.last().needs_frame != !frame_is_built_ ||
+        jump_table_.last().is_lazy_deopt != needs_lazy_deopt) {
+      JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
+      jump_table_.Add(table_entry, zone());
     }
-    if (!lazy_deopt_needed && frame_is_built_) {
-      if (FLAG_trap_on_deopt) {
-        __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-      } else {
-        __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
-      }
+    if (cc == no_condition) {
+      __ jmp(&jump_table_.last().label);
     } else {
-      // We often have several deopts to the same entry, reuse the last
-      // jump entry if this is the case.
-      if (jump_table_.is_empty() ||
-          jump_table_.last().address != entry ||
-          jump_table_.last().needs_frame != !frame_is_built_ ||
-          jump_table_.last().is_lazy_deopt != lazy_deopt_needed) {
-        JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt_needed);
-        jump_table_.Add(table_entry, zone());
-      }
-      if (FLAG_trap_on_deopt) {
-        __ jmp(&jump_table_.last().label);
-      } else {
-        __ j(cc, &jump_table_.last().label);
-      }
+      __ j(cc, &jump_table_.last().label);
     }
-    __ bind(&done);
   }
 }
 
@@ -2076,9 +2070,8 @@
         __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
                factory()->heap_number_map());
         __ j(not_equal, &not_heap_number, Label::kNear);
-        __ fldz();
-        __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
-        __ FCmp();
+        __ xorps(xmm0, xmm0);
+        __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
         __ j(zero, false_label);
         __ jmp(true_label);
         __ bind(&not_heap_number);
@@ -4156,27 +4149,9 @@
 }
 
 
-void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
-                                      HValue* value,
-                                      LOperand* operand) {
-  if (value->representation().IsTagged() && !value->type().IsSmi()) {
-    if (operand->IsRegister()) {
-      __ test(ToRegister(operand), Immediate(kSmiTagMask));
-    } else {
-      __ test(ToOperand(operand), Immediate(kSmiTagMask));
-    }
-    DeoptimizeIf(not_zero, environment);
-  }
-}
-
-
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
-  DeoptIfTaggedButNotSmi(instr->environment(),
-                         instr->hydrogen()->length(),
-                         instr->length());
-  DeoptIfTaggedButNotSmi(instr->environment(),
-                         instr->hydrogen()->index(),
-                         instr->index());
+  if (instr->hydrogen()->skip_check()) return;
+
   if (instr->index()->IsConstantOperand()) {
     int constant_index =
         ToInteger32(LConstantOperand::cast(instr->index()));
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 2834305..ea0c777 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -314,10 +314,6 @@
       LEnvironment* env,
       NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
 
-  void DeoptIfTaggedButNotSmi(LEnvironment* environment,
-                              HValue* value,
-                              LOperand* operand);
-
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
   // true and false label should be made, to optimize fallthrough.
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 37d8390..a25c154 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1712,6 +1712,11 @@
 }
 
 
+LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
+  return NULL;
+}
+
+
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
   return AssignEnvironment(new(zone()) LBoundsCheck(
       UseRegisterOrConstantAtStart(instr->index()),
@@ -1857,6 +1862,12 @@
 }
 
 
+LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
+  LOperand* value = UseAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
 LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
   // If the target is in new space, we'll emit a global cell compare and so
   // want the value in a register.  If the target gets promoted before we
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 96dc46b..d7ca4a1 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1066,14 +1066,15 @@
 }
 
 
-void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
-                                                  Register name_reg,
-                                                  Register scratch1,
-                                                  Register scratch2,
-                                                  Register scratch3,
-                                                  Handle<AccessorInfo> callback,
-                                                  Handle<String> name,
-                                                  Label* miss) {
+void StubCompiler::GenerateDictionaryLoadCallback(
+    Register receiver,
+    Register name_reg,
+    Register scratch1,
+    Register scratch2,
+    Register scratch3,
+    Handle<ExecutableAccessorInfo> callback,
+    Handle<String> name,
+    Label* miss) {
   ASSERT(!receiver.is(scratch2));
   ASSERT(!receiver.is(scratch3));
   Register dictionary = scratch1;
@@ -1118,17 +1119,18 @@
 }
 
 
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
-                                        Handle<JSObject> holder,
-                                        Register receiver,
-                                        Register name_reg,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Register scratch3,
-                                        Register scratch4,
-                                        Handle<AccessorInfo> callback,
-                                        Handle<String> name,
-                                        Label* miss) {
+void StubCompiler::GenerateLoadCallback(
+    Handle<JSObject> object,
+    Handle<JSObject> holder,
+    Register receiver,
+    Register name_reg,
+    Register scratch1,
+    Register scratch2,
+    Register scratch3,
+    Register scratch4,
+    Handle<ExecutableAccessorInfo> callback,
+    Handle<String> name,
+    Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
@@ -1152,7 +1154,7 @@
   // Push data from AccessorInfo.
   if (isolate()->heap()->InNewSpace(callback->data())) {
     __ mov(scratch1, Immediate(callback));
-    __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));
+    __ push(FieldOperand(scratch1, ExecutableAccessorInfo::kDataOffset));
   } else {
     __ push(Immediate(Handle<Object>(callback->data())));
   }
@@ -1232,8 +1234,9 @@
     if (lookup->IsField()) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-               lookup->GetCallbackObject()->IsAccessorInfo()) {
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+               lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+      ExecutableAccessorInfo* callback =
+          ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
       compile_followup_inline = callback->getter() != NULL &&
           callback->IsCompatibleReceiver(*object);
     }
@@ -1325,8 +1328,8 @@
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      Handle<AccessorInfo> callback(
-          AccessorInfo::cast(lookup->GetCallbackObject()));
+      Handle<ExecutableAccessorInfo> callback(
+          ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
@@ -1336,7 +1339,7 @@
       __ push(receiver);
       __ push(holder_reg);
       __ mov(holder_reg, Immediate(callback));
-      __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
+      __ push(FieldOperand(holder_reg, ExecutableAccessorInfo::kDataOffset));
       __ push(Immediate(reinterpret_cast<int>(isolate())));
       __ push(holder_reg);
       __ push(name_reg);
@@ -2656,7 +2659,7 @@
     Handle<String> name,
     Handle<JSObject> receiver,
     Handle<JSObject> holder,
-    Handle<AccessorInfo> callback) {
+    Handle<ExecutableAccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : name
diff --git a/src/ic.cc b/src/ic.cc
index b74b0d9..e88ba7d 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1053,8 +1053,9 @@
       return isolate()->stub_cache()->ComputeLoadNormal();
     case CALLBACKS: {
       Handle<Object> callback(lookup->GetCallbackObject());
-      if (callback->IsAccessorInfo()) {
-        Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(callback);
+      if (callback->IsExecutableAccessorInfo()) {
+        Handle<ExecutableAccessorInfo> info =
+            Handle<ExecutableAccessorInfo>::cast(callback);
         if (v8::ToCData<Address>(info->getter()) == 0) break;
         if (!info->IsCompatibleReceiver(*receiver)) break;
         return isolate()->stub_cache()->ComputeLoadCallback(
@@ -1067,6 +1068,8 @@
         return isolate()->stub_cache()->ComputeLoadViaGetter(
             name, receiver, holder, Handle<JSFunction>::cast(getter));
       }
+      // TODO(dcarney): Handle correctly.
+      if (callback->IsDeclaredAccessorInfo()) break;
       ASSERT(callback->IsForeign());
       // No IC support for old-style native accessors.
       break;
@@ -1282,9 +1285,10 @@
     }
     case CALLBACKS: {
       Handle<Object> callback_object(lookup->GetCallbackObject());
-      if (!callback_object->IsAccessorInfo()) break;
-      Handle<AccessorInfo> callback =
-          Handle<AccessorInfo>::cast(callback_object);
+      // TODO(dcarney): Handle DeclaredAccessorInfo correctly.
+      if (!callback_object->IsExecutableAccessorInfo()) break;
+      Handle<ExecutableAccessorInfo> callback =
+          Handle<ExecutableAccessorInfo>::cast(callback_object);
       if (v8::ToCData<Address>(callback->getter()) == 0) break;
       if (!callback->IsCompatibleReceiver(*receiver)) break;
       return isolate()->stub_cache()->ComputeKeyedLoadCallback(
@@ -1475,8 +1479,9 @@
       return isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
     case CALLBACKS: {
       Handle<Object> callback(lookup->GetCallbackObject());
-      if (callback->IsAccessorInfo()) {
-        Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(callback);
+      if (callback->IsExecutableAccessorInfo()) {
+        Handle<ExecutableAccessorInfo> info =
+            Handle<ExecutableAccessorInfo>::cast(callback);
         if (v8::ToCData<Address>(info->setter()) == 0) break;
         if (!holder->HasFastProperties()) break;
         if (!info->IsCompatibleReceiver(*receiver)) break;
@@ -1491,6 +1496,8 @@
             name, receiver, holder, Handle<JSFunction>::cast(setter),
             strict_mode);
       }
+      // TODO(dcarney): Handle correctly.
+      if (callback->IsDeclaredAccessorInfo()) break;
       ASSERT(callback->IsForeign());
       // No IC support for old-style native accessors.
       break;
@@ -1544,9 +1551,10 @@
   if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
     // Optimistically assume that ICs that haven't reached the MONOMORPHIC state
     // yet will do so and stay there.
+    Handle<Map> monomorphic_map = ComputeTransitionedMap(receiver, stub_kind);
     stub_kind = GetNoTransitionStubKind(stub_kind);
     return isolate()->stub_cache()->ComputeKeyedStoreElement(
-        receiver_map, stub_kind, strict_mode, grow_mode);
+        monomorphic_map, stub_kind, strict_mode, grow_mode);
   }
 
   GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps);
@@ -1633,8 +1641,7 @@
                                                 FAST_HOLEY_DOUBLE_ELEMENTS);
     case STORE_NO_TRANSITION:
     case STORE_AND_GROW_NO_TRANSITION:
-      UNREACHABLE();
-      break;
+      return Handle<Map>(receiver->map());
   }
   return Handle<Map>::null();
 }
diff --git a/src/isolate.cc b/src/isolate.cc
index c8d9c3a..cb63b2b 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1548,6 +1548,14 @@
 }
 
 
+Isolate::ThreadDataTable::~ThreadDataTable() {
+  // TODO(svenpanne) The assertion below would fire if an embedder does not
+  // cleanly dispose all Isolates before disposing v8, so we are conservative
+  // and leave it out for now.
+  // ASSERT_EQ(NULL, list_);
+}
+
+
 Isolate::PerIsolateThreadData*
     Isolate::ThreadDataTable::Lookup(Isolate* isolate,
                                      ThreadId thread_id) {
@@ -1735,6 +1743,11 @@
 }
 
 
+void Isolate::GlobalTearDown() {
+  delete thread_data_table_;
+}
+
+
 void Isolate::Deinit() {
   if (state_ == INITIALIZED) {
     TRACE_ISOLATE(deinit);
diff --git a/src/isolate.h b/src/isolate.h
index df70ba9..b0142bf 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -470,6 +470,8 @@
   // for legacy API reasons.
   void TearDown();
 
+  static void GlobalTearDown();
+
   bool IsDefaultIsolate() const { return this == default_isolate_; }
 
   // Ensures that process-wide resources and the default isolate have been
diff --git a/src/json-stringifier.h b/src/json-stringifier.h
index cec9844..55f4350 100644
--- a/src/json-stringifier.h
+++ b/src/json-stringifier.h
@@ -206,8 +206,39 @@
     "p\0      q\0      r\0      s\0      "
     "t\0      u\0      v\0      w\0      "
     "x\0      y\0      z\0      {\0      "
-    "|\0      }\0      ~\0      \177\0      ";
-
+    "|\0      }\0      ~\0      \177\0      "
+    "\200\0      \201\0      \202\0      \203\0      "
+    "\204\0      \205\0      \206\0      \207\0      "
+    "\210\0      \211\0      \212\0      \213\0      "
+    "\214\0      \215\0      \216\0      \217\0      "
+    "\220\0      \221\0      \222\0      \223\0      "
+    "\224\0      \225\0      \226\0      \227\0      "
+    "\230\0      \231\0      \232\0      \233\0      "
+    "\234\0      \235\0      \236\0      \237\0      "
+    "\240\0      \241\0      \242\0      \243\0      "
+    "\244\0      \245\0      \246\0      \247\0      "
+    "\250\0      \251\0      \252\0      \253\0      "
+    "\254\0      \255\0      \256\0      \257\0      "
+    "\260\0      \261\0      \262\0      \263\0      "
+    "\264\0      \265\0      \266\0      \267\0      "
+    "\270\0      \271\0      \272\0      \273\0      "
+    "\274\0      \275\0      \276\0      \277\0      "
+    "\300\0      \301\0      \302\0      \303\0      "
+    "\304\0      \305\0      \306\0      \307\0      "
+    "\310\0      \311\0      \312\0      \313\0      "
+    "\314\0      \315\0      \316\0      \317\0      "
+    "\320\0      \321\0      \322\0      \323\0      "
+    "\324\0      \325\0      \326\0      \327\0      "
+    "\330\0      \331\0      \332\0      \333\0      "
+    "\334\0      \335\0      \336\0      \337\0      "
+    "\340\0      \341\0      \342\0      \343\0      "
+    "\344\0      \345\0      \346\0      \347\0      "
+    "\350\0      \351\0      \352\0      \353\0      "
+    "\354\0      \355\0      \356\0      \357\0      "
+    "\360\0      \361\0      \362\0      \363\0      "
+    "\364\0      \365\0      \366\0      \367\0      "
+    "\370\0      \371\0      \372\0      \373\0      "
+    "\374\0      \375\0      \376\0      \377\0      ";
 
 BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
     : isolate_(isolate), current_index_(0), is_ascii_(true) {
@@ -698,9 +729,15 @@
 }
 
 
-template <typename Char>
-bool BasicJsonStringifier::DoNotEscape(Char c) {
-  return (c >= 0x80) || (c >= '#' && c <= '~' && c != '\\');
+template <>
+bool BasicJsonStringifier::DoNotEscape(uint8_t c) {
+  return c >= '#' && c <= '~' && c != '\\';
+}
+
+
+template <>
+bool BasicJsonStringifier::DoNotEscape(uint16_t c) {
+  return c >= '#' && c != '\\' && c != 0x7f;
 }
 
 
diff --git a/src/lithium.cc b/src/lithium.cc
index 5967e42..e943cc0 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -174,6 +174,9 @@
 
 void LEnvironment::PrintTo(StringStream* stream) {
   stream->Add("[id=%d|", ast_id().ToInt());
+  if (deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
+    stream->Add("deopt_id=%d|", deoptimization_index());
+  }
   stream->Add("[parameters=%d|", parameter_count());
   stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
   for (int i = 0; i < values_.length(); ++i) {
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 2502ae6..23e7b43 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -1047,10 +1047,11 @@
   // Since we are not in an incremental marking phase we can write pointers
   // to code objects (that are never in new space) without worrying about
   // write barriers.
-  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+  Heap* heap = original->GetHeap();
+  heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
                           "liveedit.cc ReplaceCodeObject");
 
-  ASSERT(!HEAP->InNewSpace(*substitution));
+  ASSERT(!heap->InNewSpace(*substitution));
 
   AssertNoAllocation no_allocations_please;
 
@@ -1059,11 +1060,11 @@
   // Iterate over all roots. Stack frames may have pointer into original code,
   // so temporary replace the pointers with offset numbers
   // in prologue/epilogue.
-  HEAP->IterateRoots(&visitor, VISIT_ALL);
+  heap->IterateRoots(&visitor, VISIT_ALL);
 
   // Now iterate over all pointers of all objects, including code_target
   // implicit pointers.
-  HeapIterator iterator;
+  HeapIterator iterator(heap);
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     obj->Iterate(&visitor);
   }
@@ -1129,7 +1130,7 @@
                                  Visitor* visitor) {
     AssertNoAllocation no_allocations_please;
 
-    HeapIterator iterator;
+    HeapIterator iterator(shared_info->GetHeap());
     for (HeapObject* obj = iterator.next(); obj != NULL;
         obj = iterator.next()) {
       if (obj->IsJSFunction()) {
diff --git a/src/log.cc b/src/log.cc
index 0931f92..8b8376e 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1394,9 +1394,10 @@
 };
 
 
-static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
+static int EnumerateCompiledFunctions(Heap* heap,
+                                      Handle<SharedFunctionInfo>* sfis,
                                       Handle<Code>* code_objects) {
-  HeapIterator iterator;
+  HeapIterator iterator(heap);
   AssertNoAllocation no_alloc;
   int compiled_funcs_count = 0;
 
@@ -1562,9 +1563,10 @@
 
 
 void Logger::LogCodeObjects() {
-  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+  Heap* heap = HEAP;
+  heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
                           "Logger::LogCodeObjects");
-  HeapIterator iterator;
+  HeapIterator iterator(heap);
   AssertNoAllocation no_alloc;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (obj->IsCode()) LogCodeObject(obj);
@@ -1618,13 +1620,14 @@
 
 
 void Logger::LogCompiledFunctions() {
-  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+  Heap* heap = HEAP;
+  heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
                           "Logger::LogCompiledFunctions");
   HandleScope scope;
-  const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
+  const int compiled_funcs_count = EnumerateCompiledFunctions(heap, NULL, NULL);
   ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
   ScopedVector< Handle<Code> > code_objects(compiled_funcs_count);
-  EnumerateCompiledFunctions(sfis.start(), code_objects.start());
+  EnumerateCompiledFunctions(heap, sfis.start(), code_objects.start());
 
   // During iteration, there can be heap allocation due to
   // GetScriptLineNumber call.
@@ -1638,13 +1641,14 @@
 
 
 void Logger::LogAccessorCallbacks() {
-  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+  Heap* heap = HEAP;
+  heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
                           "Logger::LogAccessorCallbacks");
-  HeapIterator iterator;
+  HeapIterator iterator(heap);
   AssertNoAllocation no_alloc;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
-    if (!obj->IsAccessorInfo()) continue;
-    AccessorInfo* ai = AccessorInfo::cast(obj);
+    if (!obj->IsExecutableAccessorInfo()) continue;
+    ExecutableAccessorInfo* ai = ExecutableAccessorInfo::cast(obj);
     if (!ai->name()->IsString()) continue;
     String* name = String::cast(ai->name());
     Address getter_entry = v8::ToCData<Address>(ai->getter());
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 869b318..5f22565 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -66,6 +66,7 @@
       marking_parity_(ODD_MARKING_PARITY),
       compacting_(false),
       was_marked_incrementally_(false),
+      sweeping_pending_(false),
       tracer_(NULL),
       migration_slots_buffer_(NULL),
       heap_(NULL),
@@ -287,7 +288,8 @@
           case TYPE_FEEDBACK_INFO_TYPE:
             object->Iterate(this);
             break;
-          case ACCESSOR_INFO_TYPE:
+          case DECLARED_ACCESSOR_INFO_TYPE:
+          case EXECUTABLE_ACCESSOR_INFO_TYPE:
           case BYTE_ARRAY_TYPE:
           case CALL_HANDLER_INFO_TYPE:
           case CODE_TYPE:
@@ -526,7 +528,7 @@
 
 
 void MarkCompactCollector::StartSweeperThreads() {
-  SweeperThread::set_sweeping_pending(true);
+  sweeping_pending_ = true;
   for (int i = 0; i < FLAG_sweeper_threads; i++) {
     heap()->isolate()->sweeper_threads()[i]->StartSweeping();
   }
@@ -534,11 +536,11 @@
 
 
 void MarkCompactCollector::WaitUntilSweepingCompleted() {
-  if (SweeperThread::sweeping_pending()) {
+  if (sweeping_pending_) {
     for (int i = 0; i < FLAG_sweeper_threads; i++) {
       heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread();
     }
-    SweeperThread::set_sweeping_pending(false);
+    sweeping_pending_ = false;
     StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
     StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
     heap()->FreeQueuedChunks();
@@ -562,7 +564,7 @@
 
 
 bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
-  return SweeperThread::sweeping_pending();
+  return sweeping_pending_;
 }
 
 
@@ -901,7 +903,7 @@
     StartCompaction(NON_INCREMENTAL_COMPACTION);
   }
 
-  PagedSpaces spaces;
+  PagedSpaces spaces(heap());
   for (PagedSpace* space = spaces.next();
        space != NULL;
        space = spaces.next()) {
@@ -1104,17 +1106,14 @@
 
 
 void CodeFlusher::EvictJSFunctionCandidates() {
-  Object* undefined = isolate_->heap()->undefined_value();
-
   JSFunction* candidate = jsfunction_candidates_head_;
   JSFunction* next_candidate;
   while (candidate != NULL) {
     next_candidate = GetNextCandidate(candidate);
-    ClearNextCandidate(candidate, undefined);
+    EvictCandidate(candidate);
     candidate = next_candidate;
   }
-
-  jsfunction_candidates_head_ = NULL;
+  ASSERT(jsfunction_candidates_head_ == NULL);
 }
 
 
@@ -1123,11 +1122,10 @@
   SharedFunctionInfo* next_candidate;
   while (candidate != NULL) {
     next_candidate = GetNextCandidate(candidate);
-    ClearNextCandidate(candidate);
+    EvictCandidate(candidate);
     candidate = next_candidate;
   }
-
-  shared_function_info_candidates_head_ = NULL;
+  ASSERT(shared_function_info_candidates_head_ == NULL);
 }
 
 
@@ -3725,7 +3723,8 @@
 
 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
   space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
-                                      sweeper == LAZY_CONSERVATIVE);
+                                      sweeper == LAZY_CONSERVATIVE ||
+                                      sweeper == PARALLEL_CONSERVATIVE);
   space->ClearStats();
 
   PageIterator it(space);
@@ -3844,9 +3843,9 @@
 #endif
   SweeperType how_to_sweep =
       FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
+  if (AreSweeperThreadsActivated()) how_to_sweep = PARALLEL_CONSERVATIVE;
   if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
   if (sweep_precisely_) how_to_sweep = PRECISE;
-  if (AreSweeperThreadsActivated()) how_to_sweep = PARALLEL_CONSERVATIVE;
   // Noncompacting collections simply sweep the spaces to clear the mark
   // bits and free the nonlive blocks (for old and map spaces).  We sweep
   // the map space last because freeing non-live maps overwrites them and
@@ -3856,6 +3855,14 @@
   SweepSpace(heap()->old_pointer_space(), how_to_sweep);
   SweepSpace(heap()->old_data_space(), how_to_sweep);
 
+  if (how_to_sweep == PARALLEL_CONSERVATIVE) {
+    // TODO(hpayer): fix race with concurrent sweeper
+    StartSweeperThreads();
+    if (FLAG_parallel_sweeping && !FLAG_concurrent_sweeping) {
+      WaitUntilSweepingCompleted();
+    }
+  }
+
   RemoveDeadInvalidatedCode();
   SweepSpace(heap()->code_space(), PRECISE);
 
@@ -3863,15 +3870,6 @@
 
   EvacuateNewSpaceAndCandidates();
 
-  if (AreSweeperThreadsActivated()) {
-    // TODO(hpayer): The starting of the sweeper threads should be after
-    // SweepSpace old data space.
-    StartSweeperThreads();
-    if (FLAG_parallel_sweeping && !FLAG_concurrent_sweeping) {
-      WaitUntilSweepingCompleted();
-    }
-  }
-
   // ClearNonLiveTransitions depends on precise sweeping of map space to
   // detect whether unmarked map became dead in this collection or in one
   // of the previous ones.
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 2613a3e..7a32857 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -740,6 +740,9 @@
 
   bool was_marked_incrementally_;
 
+  // True if concurrent or parallel sweeping is currently in progress.
+  bool sweeping_pending_;
+
   // A pointer to the current stack-allocated GC tracer object during a full
   // collection (NULL before and after).
   GCTracer* tracer_;
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index 38a7cf0..0499d36 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -91,7 +91,7 @@
 
 int DoubleRegister::NumRegisters() {
   if (CpuFeatures::IsSupported(FPU)) {
-    return FPURegister::kNumRegisters;
+    return FPURegister::kMaxNumRegisters;
   } else {
     return 1;
   }
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index a282bcc..d108edc 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -189,7 +189,7 @@
 
 // Coprocessor register.
 struct FPURegister {
-  static const int kNumRegisters = v8::internal::kNumFPURegisters;
+  static const int kMaxNumRegisters = v8::internal::kNumFPURegisters;
 
   // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
   // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
@@ -200,7 +200,7 @@
   //  f28: 0.0
   //  f30: scratch register.
   static const int kNumReservedRegisters = 2;
-  static const int kMaxNumAllocatableRegisters = kNumRegisters / 2 -
+  static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 -
       kNumReservedRegisters;
 
   inline static int NumRegisters();
@@ -218,7 +218,7 @@
     return r;
   }
 
-  bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; }
+  bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; }
   bool is(FPURegister creg) const { return code_ == creg.code_; }
   FPURegister low() const {
     // Find low reg of a Double-reg pair, which is the reg itself.
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 7d03e91..9f5ff8b 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -50,6 +50,18 @@
 }
 
 
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { a0, a1 };
+  descriptor->register_param_count_ = 2;
+  descriptor->register_params_ = registers;
+  Address entry =
+      Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+  descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
+}
+
+
 #define __ ACCESS_MASM(masm)
 
 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
@@ -7982,7 +7994,12 @@
   bool save_fp_regs = CpuFeatures::IsSupported(FPU);
   CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
+  int parameter_count_offset =
+      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+  __ lw(a1, MemOperand(fp, parameter_count_offset));
   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+  __ sll(a1, a1, kPointerSizeLog2);
+  __ Addu(sp, sp, a1);
   __ Ret();
 }
 
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index f5caab9..006a34f 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -155,7 +155,7 @@
   // -----------------------------------
   if (mode == TRACK_ALLOCATION_SITE) {
     ASSERT(allocation_site_info_found != NULL);
-    masm->TestJSArrayForAllocationSiteInfo(a2, t0,
+    masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq,
                                            allocation_site_info_found);
   }
 
@@ -188,7 +188,7 @@
   Register scratch = t6;
 
   if (mode == TRACK_ALLOCATION_SITE) {
-    masm->TestJSArrayForAllocationSiteInfo(a2, t0, fail);
+    masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail);
   }
 
   // Check for empty arrays, which only require a map transition and no changes
@@ -332,7 +332,7 @@
   Label entry, loop, convert_hole, gc_required, only_change_map;
 
   if (mode == TRACK_ALLOCATION_SITE) {
-    masm->TestJSArrayForAllocationSiteInfo(a2, t0, fail);
+    masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail);
   }
 
   // Check for empty arrays, which only require a map transition and no changes
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 9a8cfd0..cead13f 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -459,13 +459,19 @@
   // v  +-------------------------+          +-------------------------|
   //    |   COMPILED_STUB marker  |          |   STUB_FAILURE marker   |
   //    +-------------------------+          +-------------------------+
-  //    |                         |          |     stub parameter 1    |
+  //    |                         |          |   caller args.length_   |
   //    | ...                     |          +-------------------------+
-  //    |                         |          |            ...          |
+  //    |                         |          |  caller args.arguments_ |
   //    |-------------------------|<-sp      +-------------------------+
-  //                                         |     stub parameter n    |
-  //      parameters in registers            +-------------------------+<-sp
-  //       and spilled to stack              s0-s1 = number of parameters
+  //                                         |  caller args pointer    |
+  //                                         +-------------------------+
+  //                                         |  caller stack param 1   |
+  //      parameters in registers            +-------------------------+
+  //       and spilled to stack              |           ....          |
+  //                                         +-------------------------+
+  //                                         |  caller stack param n   |
+  //                                         +-------------------------+<-sp
+  //                                         s0-s1 = number of parameters
   //                                         s2 = failure handler address
   //                                         fp = saved frame
   //                                         cp = JSFunction context
@@ -476,9 +482,15 @@
   CodeStubInterfaceDescriptor* descriptor =
       isolate_->code_stub_interface_descriptor(major_key);
 
+  // The output frame must have room for all pushed register parameters
+  // and the standard stack frame slots.
   int output_frame_size = StandardFrameConstants::kFixedFrameSize +
       kPointerSize * descriptor->register_param_count_;
 
+  // Include space for an argument object to the callee and optionally
+  // the space to pass the argument object to the stub failure handler.
+  output_frame_size += sizeof(Arguments) + kPointerSize;
+
   FrameDescription* output_frame =
       new(output_frame_size) FrameDescription(output_frame_size, 0);
   ASSERT(frame_index == 0);
@@ -490,12 +502,15 @@
       reinterpret_cast<intptr_t>(notify_failure->entry()));
 
   Code* trampoline = NULL;
-  StubFailureTrampolineStub().FindCodeInCache(&trampoline, isolate_);
+  int extra = descriptor->extra_expression_stack_count_;
+  StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
   ASSERT(trampoline != NULL);
   output_frame->SetPc(reinterpret_cast<intptr_t>(
       trampoline->instruction_start()));
   unsigned input_frame_size = input_->GetFrameSize();
 
+  intptr_t frame_ptr = input_->GetRegister(fp.code());
+
   // JSFunction continuation
   intptr_t input_frame_offset = input_frame_size - kPointerSize;
   intptr_t output_frame_offset = output_frame_size - kPointerSize;
@@ -521,21 +536,50 @@
       Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
   output_frame->SetFrameSlot(output_frame_offset, value);
 
+  int caller_arg_count = 0;
+  if (descriptor->stack_parameter_count_ != NULL) {
+    caller_arg_count =
+        input_->GetRegister(descriptor->stack_parameter_count_->code());
+  }
+
+  // Build the Arguments object for the caller's parameters and a pointer to it.
+  output_frame_offset -= kPointerSize;
+  value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
+      (caller_arg_count - 1) * kPointerSize;
+  output_frame->SetFrameSlot(output_frame_offset, value);
+
+  output_frame->SetFrameSlot(output_frame_offset, value);
+  output_frame_offset -= kPointerSize;
+  output_frame->SetFrameSlot(output_frame_offset, caller_arg_count);
+
+  value = frame_ptr - (output_frame_size - output_frame_offset) -
+      StandardFrameConstants::kMarkerOffset;
+  output_frame_offset -= kPointerSize;
+  output_frame->SetFrameSlot(output_frame_offset, value);
+
+  // Copy the register parameters to the failure frame.
   for (int i = 0; i < descriptor->register_param_count_; ++i) {
     output_frame_offset -= kPointerSize;
     DoTranslateCommand(iterator, 0, output_frame_offset);
   }
 
-  value = input_->GetRegister(fp.code());
-  output_frame->SetRegister(fp.code(), value);
-  output_frame->SetFp(value);
+  for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+    double double_value = input_->GetDoubleRegister(i);
+    output_frame->SetDoubleRegister(i, double_value);
+  }
+
+  output_frame->SetRegister(fp.code(), frame_ptr);
+  output_frame->SetFp(frame_ptr);
 
   ApiFunction function(descriptor->deoptimization_handler_);
   ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
   intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
-  output_frame->SetRegister(s0.code(), descriptor->register_param_count_);
-  output_frame->SetRegister(s1.code(),
-      (descriptor->register_param_count_ - 1) * kPointerSize);
+  int params = descriptor->register_param_count_;
+  if (descriptor->stack_parameter_count_ != NULL) {
+    params++;
+  }
+  output_frame->SetRegister(s0.code(), params);
+  output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
   output_frame->SetRegister(s2.code(), handler);
 }
 
@@ -593,7 +637,7 @@
   output_frame->SetFrameSlot(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
            fp_value, output_offset, value);
   }
@@ -602,7 +646,7 @@
   output_offset -= kPointerSize;
   value = output_[frame_index - 1]->GetContext();
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
            top_address + output_offset, output_offset, value);
   }
@@ -611,7 +655,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
            top_address + output_offset, output_offset, value);
   }
@@ -620,7 +664,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(construct_stub);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; code object\n",
            top_address + output_offset, output_offset, value);
   }
@@ -629,7 +673,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
            top_address + output_offset, output_offset, value, height - 1);
   }
@@ -638,7 +682,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(function);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
            top_address + output_offset, output_offset, value);
   }
@@ -648,7 +692,7 @@
   output_offset -= kPointerSize;
   value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
            top_address + output_offset, output_offset, value);
   }
@@ -672,7 +716,7 @@
   unsigned height = 0;
   unsigned height_in_bytes = height * kPointerSize;
   const char* kind = is_setter_stub_frame ? "setter" : "getter";
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("  translating %s stub => height=%u\n", kind, height_in_bytes);
   }
 
@@ -705,7 +749,7 @@
   output_offset -= kPointerSize;
   intptr_t value = output_[frame_index - 1]->GetPc();
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; caller's pc\n",
            top_address + output_offset, output_offset, value);
@@ -717,7 +761,7 @@
   output_frame->SetFrameSlot(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; caller's fp\n",
            fp_value, output_offset, value);
@@ -727,7 +771,7 @@
   output_offset -= kPointerSize;
   value = output_[frame_index - 1]->GetContext();
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; context\n",
            top_address + output_offset, output_offset, value);
@@ -737,7 +781,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; function (%s sentinel)\n",
            top_address + output_offset, output_offset, value, kind);
@@ -751,7 +795,7 @@
   Code* accessor_stub = isolate_->builtins()->builtin(name);
   value = reinterpret_cast<intptr_t>(accessor_stub);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; code object\n",
            top_address + output_offset, output_offset, value);
@@ -797,7 +841,7 @@
   }
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("  translating ");
     function->PrintName();
     PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
@@ -861,7 +905,7 @@
     value = output_[frame_index - 1]->GetPc();
   }
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
            top_address + output_offset, output_offset, value);
   }
@@ -884,7 +928,7 @@
   if (is_topmost) {
     output_frame->SetRegister(fp.code(), fp_value);
   }
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
            fp_value, output_offset, value);
   }
@@ -902,7 +946,7 @@
   output_frame->SetFrameSlot(output_offset, value);
   output_frame->SetContext(value);
   if (is_topmost) output_frame->SetRegister(cp.code(), value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
            top_address + output_offset, output_offset, value);
   }
@@ -915,7 +959,7 @@
   // input frame.
   ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
            top_address + output_offset, output_offset, value);
   }
@@ -1076,11 +1120,11 @@
     }
   }
 
+  int double_regs_offset = FrameDescription::double_registers_offset();
   if (CpuFeatures::IsSupported(FPU)) {
     CpuFeatures::Scope scope(FPU);
     // Copy FPU registers to
     // double_registers_[DoubleRegister::kNumAllocatableRegisters]
-    int double_regs_offset = FrameDescription::double_registers_offset();
     for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
       int dst_offset = i * kDoubleSize + double_regs_offset;
       int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
@@ -1131,16 +1175,16 @@
   // Replace the current (input) frame with the output frames.
   Label outer_push_loop, inner_push_loop,
       outer_loop_header, inner_loop_header;
-  // Outer loop state: a0 = current "FrameDescription** output_",
+  // Outer loop state: t0 = current "FrameDescription** output_",
   // a1 = one past the last FrameDescription**.
   __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
-  __ lw(a0, MemOperand(a0, Deoptimizer::output_offset()));  // a0 is output_.
+  __ lw(t0, MemOperand(a0, Deoptimizer::output_offset()));  // t0 is output_.
   __ sll(a1, a1, kPointerSizeLog2);  // Count to offset.
-  __ addu(a1, a0, a1);  // a1 = one past the last FrameDescription**.
+  __ addu(a1, t0, a1);  // a1 = one past the last FrameDescription**.
   __ jmp(&outer_loop_header);
   __ bind(&outer_push_loop);
   // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
-  __ lw(a2, MemOperand(a0, 0));  // output_[ix]
+  __ lw(a2, MemOperand(t0, 0));  // output_[ix]
   __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
   __ jmp(&inner_loop_header);
   __ bind(&inner_push_loop);
@@ -1151,10 +1195,20 @@
   __ bind(&inner_loop_header);
   __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
 
-  __ Addu(a0, a0, Operand(kPointerSize));
+  __ Addu(t0, t0, Operand(kPointerSize));
   __ bind(&outer_loop_header);
-  __ Branch(&outer_push_loop, lt, a0, Operand(a1));
+  __ Branch(&outer_push_loop, lt, t0, Operand(a1));
 
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+
+    __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
+    for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
+      const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+      int src_offset = i * kDoubleSize + double_regs_offset;
+      __ ldc1(fpu_reg, MemOperand(a1, src_offset));
+    }
+  }
 
   // Push state, pc, and continuation from the last output frame.
   if (type() != OSR) {
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index 2ed358a..188e7d1 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -193,30 +193,6 @@
 };
 
 
-class StandardFrameConstants : public AllStatic {
- public:
-  // Fixed part of the frame consists of return address, caller fp,
-  // context and function.
-  static const int kFixedFrameSize    =  4 * kPointerSize;
-  static const int kExpressionsOffset = -3 * kPointerSize;
-  static const int kMarkerOffset      = -2 * kPointerSize;
-  static const int kContextOffset     = -1 * kPointerSize;
-  static const int kCallerFPOffset    =  0 * kPointerSize;
-  static const int kCallerPCOffset    = +1 * kPointerSize;
-  static const int kCallerSPOffset    = +2 * kPointerSize;
-
-  // Size of the MIPS 4 32-bit argument slots.
-  // This is just an alias with a shorter name. Use it from now on.
-  static const int kRArgsSlotsSize = 4 * kPointerSize;
-  static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
-
-  // JS argument slots size.
-  static const int kJSArgsSlotsSize = 0 * kPointerSize;
-  // Assembly builtins argument slots size.
-  static const int kBArgsSlotsSize = 0 * kPointerSize;
-};
-
-
 class JavaScriptFrameConstants : public AllStatic {
  public:
   // FP-relative.
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 86669f4..0e4c615 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -147,14 +147,21 @@
 
   info()->set_prologue_offset(masm_->pc_offset());
   if (NeedsEagerFrame()) {
-    // The following three instructions must remain together and unmodified for
-    // code aging to work properly.
-    __ Push(ra, fp, cp, a1);
-    // Add unused load of ip to ensure prologue sequence is identical for
-    // full-codegen and lithium-codegen.
-    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-    // Adj. FP to point to saved FP.
-    __ Addu(fp, sp, Operand(2 * kPointerSize));
+    if (info()->IsStub()) {
+      __ Push(ra, fp, cp);
+      __ Push(Smi::FromInt(StackFrame::STUB));
+      // Adjust FP to point to saved FP.
+      __ Addu(fp, sp, Operand(2 * kPointerSize));
+    } else {
+      // The following three instructions must remain together and unmodified
+      // for code aging to work properly.
+      __ Push(ra, fp, cp, a1);
+      // Add unused load of ip to ensure prologue sequence is identical for
+      // full-codegen and lithium-codegen.
+      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+      // Adj. FP to point to saved FP.
+      __ Addu(fp, sp, Operand(2 * kPointerSize));
+    }
     frame_is_built_ = true;
   }
 
@@ -162,18 +169,37 @@
   int slots = GetStackSlotCount();
   if (slots > 0) {
     if (FLAG_debug_code) {
-      __ li(a0, Operand(slots));
-      __ li(a2, Operand(kSlotsZapValue));
+      __ Subu(sp,  sp, Operand(slots * kPointerSize));
+      __ push(a0);
+      __ push(a1);
+      __ Addu(a0, sp, Operand(slots *  kPointerSize));
+      __ li(a1, Operand(kSlotsZapValue));
       Label loop;
       __ bind(&loop);
-      __ push(a2);
-      __ Subu(a0, a0, 1);
-      __ Branch(&loop, ne, a0, Operand(zero_reg));
+      __ Subu(a0, a0, Operand(kPointerSize));
+      __ sw(a1, MemOperand(a0, 2 * kPointerSize));
+      __ Branch(&loop, ne, a0, Operand(sp));
+      __ pop(a1);
+      __ pop(a0);
     } else {
       __ Subu(sp, sp, Operand(slots * kPointerSize));
     }
   }
 
+  if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    Comment(";;; Save clobbered callee double registers");
+    int count = 0;
+    BitVector* doubles = chunk()->allocated_double_registers();
+    BitVector::Iterator save_iterator(doubles);
+    while (!save_iterator.Done()) {
+      __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+              MemOperand(sp, count * kDoubleSize));
+      save_iterator.Advance();
+      count++;
+    }
+  }
+
   // Possibly allocate a local context.
   int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots > 0) {
@@ -772,7 +798,7 @@
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-
+  ASSERT(info()->IsOptimizing() || info()->IsStub());
   Deoptimizer::BailoutType bailout_type = info()->IsStub()
       ? Deoptimizer::LAZY
       : Deoptimizer::EAGER;
@@ -783,7 +809,6 @@
   }
 
   ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on MIPS.
-
   if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) {
     __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
     return;
@@ -798,10 +823,14 @@
     __ bind(&skip);
   }
 
-  bool needs_lazy_deopt = info()->IsStub();
   ASSERT(info()->IsStub() || frame_is_built_);
-  if (cc == al && !needs_lazy_deopt) {
-    __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+  bool needs_lazy_deopt = info()->IsStub();
+  if (cc == al && frame_is_built_) {
+    if (needs_lazy_deopt) {
+      __ Call(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+    } else {
+      __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+    }
   } else {
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
@@ -2461,11 +2490,26 @@
     __ push(v0);
     __ CallRuntime(Runtime::kTraceExit, 1);
   }
+  if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    ASSERT(NeedsEagerFrame());
+    BitVector* doubles = chunk()->allocated_double_registers();
+    BitVector::Iterator save_iterator(doubles);
+    int count = 0;
+    while (!save_iterator.Done()) {
+      __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+              MemOperand(sp, count * kDoubleSize));
+      save_iterator.Advance();
+      count++;
+    }
+  }
   if (NeedsEagerFrame()) {
     int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
     __ mov(sp, fp);
     __ Pop(ra, fp);
-    __ Addu(sp, sp, Operand(sp_delta));
+    if (!info()->IsStub()) {
+      __ Addu(sp, sp, Operand(sp_delta));
+    }
   }
   __ Jump(ra);
 }
@@ -3242,8 +3286,14 @@
 
 
 void LCodeGen::DoContext(LContext* instr) {
+  // If there is a non-return use, the context must be moved to a register.
   Register result = ToRegister(instr->result());
-  __ mov(result, cp);
+  for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
+    if (!it.value()->IsReturn()) {
+      __ mov(result, cp);
+      return;
+    }
+  }
 }
 
 
@@ -3947,29 +3997,9 @@
 }
 
 
-void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
-                                      HValue* value,
-                                      LOperand* operand) {
-  if (value->representation().IsTagged() && !value->type().IsSmi()) {
-    if (operand->IsRegister()) {
-      __ And(at, ToRegister(operand), Operand(kSmiTagMask));
-      DeoptimizeIf(ne, environment, at, Operand(zero_reg));
-    } else {
-      __ li(at, ToOperand(operand));
-      __ And(at, at, Operand(kSmiTagMask));
-      DeoptimizeIf(ne, environment, at, Operand(zero_reg));
-    }
-  }
-}
-
-
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
-  DeoptIfTaggedButNotSmi(instr->environment(),
-                         instr->hydrogen()->length(),
-                         instr->length());
-  DeoptIfTaggedButNotSmi(instr->environment(),
-                         instr->hydrogen()->index(),
-                         instr->index());
+  if (instr->hydrogen()->skip_check()) return;
+
   if (instr->index()->IsConstantOperand()) {
     int constant_index =
         ToInteger32(LConstantOperand::cast(instr->index()));
@@ -4190,7 +4220,6 @@
 
 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   Register object_reg = ToRegister(instr->object());
-  Register new_map_reg = ToRegister(instr->new_map_temp());
   Register scratch = scratch0();
 
   Handle<Map> from_map = instr->original_map();
@@ -4198,23 +4227,32 @@
   ElementsKind from_kind = instr->from_kind();
   ElementsKind to_kind = instr->to_kind();
 
-  __ mov(ToRegister(instr->result()), object_reg);
-
   Label not_applicable;
   __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   __ Branch(&not_applicable, ne, scratch, Operand(from_map));
 
-  __ li(new_map_reg, Operand(to_map));
   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+    Register new_map_reg = ToRegister(instr->new_map_temp());
+    __ li(new_map_reg, Operand(to_map));
     __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
     // Write barrier.
     __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
                         scratch, kRAHasBeenSaved, kDontSaveFPRegs);
+  } else if (FLAG_compiled_transitions) {
+    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+    __ mov(a0, object_reg);
+    __ li(a1, Operand(to_map));
+    TransitionElementsKindStub stub(from_kind, to_kind);
+    __ CallStub(&stub);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   } else if (IsFastSmiElementsKind(from_kind) &&
              IsFastDoubleElementsKind(to_kind)) {
     Register fixed_object_reg = ToRegister(instr->temp());
     ASSERT(fixed_object_reg.is(a2));
+    Register new_map_reg = ToRegister(instr->new_map_temp());
     ASSERT(new_map_reg.is(a3));
+    __ li(new_map_reg, Operand(to_map));
     __ mov(fixed_object_reg, object_reg);
     CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
              RelocInfo::CODE_TARGET, instr);
@@ -4222,7 +4260,9 @@
              IsFastObjectElementsKind(to_kind)) {
     Register fixed_object_reg = ToRegister(instr->temp());
     ASSERT(fixed_object_reg.is(a2));
+    Register new_map_reg = ToRegister(instr->new_map_temp());
     ASSERT(new_map_reg.is(a3));
+    __ li(new_map_reg, Operand(to_map));
     __ mov(fixed_object_reg, object_reg);
     CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
              RelocInfo::CODE_TARGET, instr);
@@ -4233,6 +4273,16 @@
 }
 
 
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+  Register object = ToRegister(instr->object());
+  Register temp = ToRegister(instr->temp());
+  Label fail;
+  __ TestJSArrayForAllocationSiteInfo(object, temp, ne, &fail);
+  DeoptimizeIf(al, instr->environment());
+  __ bind(&fail);
+}
+
+
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   __ push(ToRegister(instr->left()));
   __ push(ToRegister(instr->right()));
@@ -4569,6 +4619,52 @@
   Register temp1 = ToRegister(instr->temp());
   Register temp2 = ToRegister(instr->temp2());
 
+  bool convert_hole = false;
+  HValue* change_input = instr->hydrogen()->value();
+  if (change_input->IsLoadKeyed()) {
+    HLoadKeyed* load = HLoadKeyed::cast(change_input);
+    convert_hole = load->UsesMustHandleHole();
+  }
+
+  Label no_special_nan_handling;
+  Label done;
+  if (convert_hole) {
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      DoubleRegister input_reg = ToDoubleRegister(instr->value());
+      __ BranchF(&no_special_nan_handling, NULL, eq, input_reg, input_reg);
+      __ Move(reg, scratch0(), input_reg);
+      Label canonicalize;
+      __ Branch(&canonicalize, ne, scratch0(), Operand(kHoleNanUpper32));
+      __ li(reg, factory()->the_hole_value());
+      __ Branch(&done);
+      __ bind(&canonicalize);
+      __ Move(input_reg,
+              FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+    } else {
+      Label not_hole;
+      __ Branch(&not_hole, ne, sfpd_hi, Operand(kHoleNanUpper32));
+      __ li(reg, factory()->the_hole_value());
+      __ Branch(&done);
+      __ bind(&not_hole);
+      __ And(scratch, sfpd_hi, Operand(0x7ff00000));
+      __ Branch(&no_special_nan_handling, ne, scratch, Operand(0x7ff00000));
+      Label special_nan_handling;
+      __ And(at, sfpd_hi, Operand(0x000FFFFF));
+      __ Branch(&special_nan_handling, ne, at, Operand(zero_reg));
+      __ Branch(&no_special_nan_handling, eq, sfpd_lo, Operand(zero_reg));
+      __ bind(&special_nan_handling);
+      double canonical_nan =
+          FixedDoubleArray::canonical_not_the_hole_nan_as_double();
+      uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
+      __ li(sfpd_lo,
+            Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
+      __ li(sfpd_hi,
+            Operand(static_cast<uint32_t>(casted_nan >> 32)));
+    }
+  }
+
+  __ bind(&no_special_nan_handling);
   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
   if (FLAG_inline_new) {
     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
@@ -4588,6 +4684,7 @@
   }
   // Now that we have finished with the object's real address tag it
   __ Addu(reg, reg, kHeapObjectTag);
+  __ bind(&done);
 }
 
 
@@ -4631,43 +4728,57 @@
                                 DoubleRegister result_reg,
                                 bool deoptimize_on_undefined,
                                 bool deoptimize_on_minus_zero,
-                                LEnvironment* env) {
+                                LEnvironment* env,
+                                NumberUntagDMode mode) {
   Register scratch = scratch0();
   CpuFeatures::Scope scope(FPU);
 
   Label load_smi, heap_number, done;
 
-  // Smi check.
-  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+    // Smi check.
+    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
 
-  // Heap number map check.
-  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  if (deoptimize_on_undefined) {
-    DeoptimizeIf(ne, env, scratch, Operand(at));
-  } else {
-    Label heap_number;
-    __ Branch(&heap_number, eq, scratch, Operand(at));
+    // Heap number map check.
+    __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    if (deoptimize_on_undefined) {
+      DeoptimizeIf(ne, env, scratch, Operand(at));
+    } else {
+      Label heap_number;
+      __ Branch(&heap_number, eq, scratch, Operand(at));
 
-    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-    DeoptimizeIf(ne, env, input_reg, Operand(at));
+      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+      DeoptimizeIf(ne, env, input_reg, Operand(at));
 
-    // Convert undefined to NaN.
-    __ LoadRoot(at, Heap::kNanValueRootIndex);
-    __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
+      // Convert undefined to NaN.
+      __ LoadRoot(at, Heap::kNanValueRootIndex);
+      __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
+      __ Branch(&done);
+
+      __ bind(&heap_number);
+    }
+    // Heap number to double register conversion.
+    __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+    if (deoptimize_on_minus_zero) {
+      __ mfc1(at, result_reg.low());
+      __ Branch(&done, ne, at, Operand(zero_reg));
+      __ mfc1(scratch, result_reg.high());
+      DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+    }
     __ Branch(&done);
-
-    __ bind(&heap_number);
+  } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
+    __ SmiUntag(scratch, input_reg);
+    DeoptimizeIf(Ugreater_equal, env, scratch, Operand(zero_reg));
+  } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
+    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+    __ Move(result_reg,
+            FixedDoubleArray::hole_nan_as_double());
+    __ Branch(&done);
+  } else {
+    __ SmiUntag(scratch, input_reg);
+    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
   }
-  // Heap number to double register conversion.
-  __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
-  if (deoptimize_on_minus_zero) {
-    __ mfc1(at, result_reg.low());
-    __ Branch(&done, ne, at, Operand(zero_reg));
-    __ mfc1(scratch, result_reg.high());
-    DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
-  }
-  __ Branch(&done);
 
   // Smi to double register conversion
   __ bind(&load_smi);
@@ -4794,10 +4905,28 @@
   Register input_reg = ToRegister(input);
   DoubleRegister result_reg = ToDoubleRegister(result);
 
+  NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
+  HValue* value = instr->hydrogen()->value();
+  if (value->type().IsSmi()) {
+    if (value->IsLoadKeyed()) {
+      HLoadKeyed* load = HLoadKeyed::cast(value);
+      if (load->UsesMustHandleHole()) {
+        if (load->hole_mode() == ALLOW_RETURN_HOLE) {
+          mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
+        } else {
+          mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
+        }
+      } else {
+        mode = NUMBER_CANDIDATE_IS_SMI;
+      }
+    }
+  }
+
   EmitNumberUntagD(input_reg, result_reg,
                    instr->hydrogen()->deoptimize_on_undefined(),
                    instr->hydrogen()->deoptimize_on_minus_zero(),
-                   instr->environment());
+                   instr->environment(),
+                   mode);
 }
 
 
@@ -5091,6 +5220,63 @@
 }
 
 
+void LCodeGen::DoAllocate(LAllocate* instr) {
+  class DeferredAllocate: public LDeferredCode {
+   public:
+    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LAllocate* instr_;
+  };
+
+  DeferredAllocate* deferred =
+      new(zone()) DeferredAllocate(this, instr);
+
+  Register size = ToRegister(instr->size());
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->temp1());
+  Register scratch2 = ToRegister(instr->temp2());
+
+  HAllocate* original_instr = instr->hydrogen();
+  if (original_instr->size()->IsConstant()) {
+    UNREACHABLE();
+  } else {
+    // Allocate memory for the object.
+    AllocationFlags flags = TAG_OBJECT;
+    if (original_instr->MustAllocateDoubleAligned()) {
+      flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+    }
+    __ AllocateInNewSpace(size,
+                          result,
+                          scratch,
+                          scratch2,
+                          deferred->entry(),
+                          TAG_OBJECT);
+  }
+
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+  Register size = ToRegister(instr->size());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ SmiTag(size, size);
+  __ push(size);
+  CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
   Handle<FixedArray> literals(instr->environment()->closure()->literals());
   ElementsKind boilerplate_elements_kind =
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index 83bda9a..598a847 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -133,6 +133,7 @@
   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
   void DoDeferredAllocateObject(LAllocateObject* instr);
+  void DoDeferredAllocate(LAllocate* instr);
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
 
@@ -326,11 +327,8 @@
                         DoubleRegister result,
                         bool deoptimize_on_undefined,
                         bool deoptimize_on_minus_zero,
-                        LEnvironment* env);
-
-  void DeoptIfTaggedButNotSmi(LEnvironment* environment,
-                              HValue* value,
-                              LOperand* operand);
+                        LEnvironment* env,
+                        NumberUntagDMode mode);
 
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 736890e..863457e 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1590,6 +1590,11 @@
 }
 
 
+LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
+  return NULL;
+}
+
+
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
   LOperand* value = UseRegisterOrConstantAtStart(instr->index());
   LOperand* length = UseRegister(instr->length());
@@ -1729,6 +1734,12 @@
 }
 
 
+LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
 LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
   return AssignEnvironment(new(zone()) LCheckFunction(value));
@@ -2005,12 +2016,16 @@
 
 LInstruction* LChunkBuilder::DoTransitionElementsKind(
     HTransitionElementsKind* instr) {
+  LOperand* object = UseRegister(instr->object());
   if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
-    LOperand* object = UseRegister(instr->object());
     LOperand* new_map_reg = TempRegister();
     LTransitionElementsKind* result =
         new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
     return DefineSameAsFirst(result);
+  } else if (FLAG_compiled_transitions) {
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, NULL, NULL);
+    return AssignPointerMap(result);
   } else {
     LOperand* object = UseFixed(instr->object(), a0);
     LOperand* fixed_object_reg = FixedTemp(a2);
@@ -2019,11 +2034,21 @@
         new(zone()) LTransitionElementsKind(object,
                                             new_map_reg,
                                             fixed_object_reg);
-    return MarkAsCall(DefineFixed(result, v0), instr);
+    return MarkAsCall(result, instr);
   }
 }
 
 
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+    HTrapAllocationMemento* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* temp = TempRegister();
+  LTrapAllocationMemento* result =
+      new(zone()) LTrapAllocationMemento(object, temp);
+  return AssignEnvironment(result);
+}
+
+
 LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
   bool needs_write_barrier = instr->NeedsWriteBarrier();
   bool needs_write_barrier_for_map = !instr->transition().is_null() &&
@@ -2090,12 +2115,23 @@
 
 
 LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+  info()->MarkAsDeferredCalling();
   LAllocateObject* result =
       new(zone()) LAllocateObject(TempRegister(), TempRegister());
   return AssignPointerMap(DefineAsRegister(result));
 }
 
 
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* size = UseTempRegister(instr->size());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
 LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
   return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, v0), instr);
 }
@@ -2139,7 +2175,7 @@
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
   LParameter* result = new(zone()) LParameter;
-  if (info()->IsOptimizing()) {
+  if (instr->kind() == HParameter::STACK_PARAMETER) {
     int spill_index = chunk()->GetParameterStackSlot(instr->index());
     return DefineAsSpilled(result, spill_index);
   } else {
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 45754aa..738a464 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -50,6 +50,7 @@
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
   V(AllocateObject)                             \
+  V(Allocate)                                   \
   V(ApplyArguments)                             \
   V(ArgumentsElements)                          \
   V(ArgumentsLength)                            \
@@ -173,6 +174,7 @@
   V(Throw)                                      \
   V(ToFastProperties)                           \
   V(TransitionElementsKind)                     \
+  V(TrapAllocationMemento)                      \
   V(Typeof)                                     \
   V(TypeofIsAndBranch)                          \
   V(UnaryMathOperation)                         \
@@ -1583,6 +1585,7 @@
 class LContext: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+  DECLARE_HYDROGEN_ACCESSOR(Context)
 };
 
 
@@ -1816,6 +1819,7 @@
   LOperand* temp2() { return temps_[1]; }
 
   DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
 };
 
 
@@ -2000,10 +2004,10 @@
  public:
   LTransitionElementsKind(LOperand* object,
                           LOperand* new_map_temp,
-                          LOperand* temp) {
+                          LOperand* fixed_object_temp) {
     inputs_[0] = object;
     temps_[0] = new_map_temp;
-    temps_[1] = temp;
+    temps_[1] = fixed_object_temp;
   }
 
   LOperand* object() { return inputs_[0]; }
@@ -2023,6 +2027,22 @@
 };
 
 
+class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
+ public:
+  LTrapAllocationMemento(LOperand* object,
+                         LOperand* temp) {
+    inputs_[0] = object;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+                               "trap-allocation-memento")
+};
+
+
 class LStringAdd: public LTemplateInstruction<1, 2, 0> {
  public:
   LStringAdd(LOperand* left, LOperand* right) {
@@ -2203,7 +2223,7 @@
 };
 
 
-class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
+class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
  public:
   LAllocateObject(LOperand* temp, LOperand* temp2) {
     temps_[0] = temp;
@@ -2218,6 +2238,23 @@
 };
 
 
+class LAllocate: public LTemplateInstruction<1, 2, 2> {
+ public:
+  LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+    inputs_[1] = size;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* size() { return inputs_[1]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
 class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 478e636..067634e 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -4632,16 +4632,17 @@
 
   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
   if (save_doubles) {
+    CpuFeatures::Scope scope(FPU);
     // The stack  must be allign to 0 modulo 8 for stores with sdc1.
     ASSERT(kDoubleSize == frame_alignment);
     if (frame_alignment > 0) {
       ASSERT(IsPowerOf2(frame_alignment));
       And(sp, sp, Operand(-frame_alignment));  // Align stack.
     }
-    int space = FPURegister::kNumRegisters * kDoubleSize;
+    int space = FPURegister::kMaxNumRegisters * kDoubleSize;
     Subu(sp, sp, Operand(space));
     // Remember: we only need to save every 2nd double FPU value.
-    for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+    for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
       FPURegister reg = FPURegister::from_code(i);
       sdc1(reg, MemOperand(sp, i * kDoubleSize));
     }
@@ -4669,9 +4670,10 @@
                                     bool do_return) {
   // Optionally restore all double registers.
   if (save_doubles) {
+    CpuFeatures::Scope scope(FPU);
     // Remember: we only need to restore every 2nd double FPU value.
     lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
-    for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+    for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
       FPURegister reg = FPURegister::from_code(i);
       ldc1(reg, MemOperand(t8, i  * kDoubleSize + kPointerSize));
     }
@@ -5448,6 +5450,7 @@
 void MacroAssembler::TestJSArrayForAllocationSiteInfo(
     Register receiver_reg,
     Register scratch_reg,
+    Condition cond,
     Label* allocation_info_present) {
   Label no_info_available;
   ExternalReference new_space_start =
@@ -5461,7 +5464,7 @@
   lw(at, MemOperand(at));
   Branch(&no_info_available, gt, scratch_reg, Operand(at));
   lw(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize));
-  Branch(allocation_info_present, eq, scratch_reg,
+  Branch(allocation_info_present, cond, scratch_reg,
       Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
   bind(&no_info_available);
 }
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 1a37aca..7c5069e 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -1448,6 +1448,7 @@
   // If allocation info is present, jump to allocation_info_present
   void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
                                         Register scratch_reg,
+                                        Condition cond,
                                         Label* allocation_info_present);
 
  private:
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 6046368..8c7a944 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -707,16 +707,34 @@
 
 
 void AccessorInfo::AccessorInfoVerify() {
-  CHECK(IsAccessorInfo());
-  VerifyPointer(getter());
-  VerifyPointer(setter());
   VerifyPointer(name());
-  VerifyPointer(data());
   VerifyPointer(flag());
   VerifyPointer(expected_receiver_type());
 }
 
 
+void ExecutableAccessorInfo::ExecutableAccessorInfoVerify() {
+  CHECK(IsExecutableAccessorInfo());
+  AccessorInfoVerify();
+  VerifyPointer(getter());
+  VerifyPointer(setter());
+  VerifyPointer(data());
+}
+
+
+void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorVerify() {
+  CHECK(IsDeclaredAccessorDescriptor());
+  VerifySmiField(kInternalFieldOffset);
+}
+
+
+void DeclaredAccessorInfo::DeclaredAccessorInfoVerify() {
+  CHECK(IsDeclaredAccessorInfo());
+  AccessorInfoVerify();
+  VerifyPointer(descriptor());
+}
+
+
 void AccessorPair::AccessorPairVerify() {
   CHECK(IsAccessorPair());
   VerifyPointer(getter());
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 18894c1..28c11a6 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -142,6 +142,11 @@
 }
 
 
+bool Object::IsAccessorInfo() {
+  return IsExecutableAccessorInfo() || IsDeclaredAccessorInfo();
+}
+
+
 bool Object::IsInstanceOf(FunctionTemplateInfo* expected) {
   // There is a constraint on the object; check.
   if (!this->IsJSObject()) return false;
@@ -2447,6 +2452,7 @@
 CAST_ACCESSOR(ExternalDoubleArray)
 CAST_ACCESSOR(ExternalPixelArray)
 CAST_ACCESSOR(Struct)
+CAST_ACCESSOR(AccessorInfo)
 
 
 #define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
@@ -4102,14 +4108,20 @@
 
 ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
 
-ACCESSORS(AccessorInfo, getter, Object, kGetterOffset)
-ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
-ACCESSORS(AccessorInfo, data, Object, kDataOffset)
 ACCESSORS(AccessorInfo, name, Object, kNameOffset)
 ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset)
 ACCESSORS(AccessorInfo, expected_receiver_type, Object,
           kExpectedReceiverTypeOffset)
 
+ACCESSORS(DeclaredAccessorDescriptor, internal_field, Smi, kInternalFieldOffset)
+
+ACCESSORS(DeclaredAccessorInfo, descriptor, DeclaredAccessorDescriptor,
+          kDescriptorOffset)
+
+ACCESSORS(ExecutableAccessorInfo, getter, Object, kGetterOffset)
+ACCESSORS(ExecutableAccessorInfo, setter, Object, kSetterOffset)
+ACCESSORS(ExecutableAccessorInfo, data, Object, kDataOffset)
+
 ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
 ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
 
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index ce23298..0acbf3a 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -874,18 +874,36 @@
 }
 
 
-void AccessorInfo::AccessorInfoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "AccessorInfo");
+void ExecutableAccessorInfo::ExecutableAccessorInfoPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "ExecutableAccessorInfo");
+  PrintF(out, "\n - name: ");
+  name()->ShortPrint(out);
+  PrintF(out, "\n - flag: ");
+  flag()->ShortPrint(out);
   PrintF(out, "\n - getter: ");
   getter()->ShortPrint(out);
   PrintF(out, "\n - setter: ");
   setter()->ShortPrint(out);
-  PrintF(out, "\n - name: ");
-  name()->ShortPrint(out);
   PrintF(out, "\n - data: ");
   data()->ShortPrint(out);
+}
+
+
+void DeclaredAccessorInfo::DeclaredAccessorInfoPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "DeclaredAccessorInfo");
+  PrintF(out, "\n - name: ");
+  name()->ShortPrint(out);
   PrintF(out, "\n - flag: ");
   flag()->ShortPrint(out);
+  PrintF(out, "\n - descriptor: ");
+  descriptor()->ShortPrint(out);
+}
+
+
+void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "DeclaredAccessorDescriptor");
+  PrintF(out, "\n - internal field: ");
+  internal_field()->ShortPrint(out);
 }
 
 
diff --git a/src/objects.cc b/src/objects.cc
index 4653931..a65a317 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -180,8 +180,8 @@
   }
 
   // api style callbacks.
-  if (structure->IsAccessorInfo()) {
-    AccessorInfo* data = AccessorInfo::cast(structure);
+  if (structure->IsExecutableAccessorInfo()) {
+    ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure);
     if (!data->IsCompatibleReceiver(receiver)) {
       Handle<Object> name_handle(name);
       Handle<Object> receiver_handle(receiver);
@@ -227,6 +227,11 @@
     return isolate->heap()->undefined_value();
   }
 
+  // TODO(dcarney): Handle correctly.
+  if (structure->IsDeclaredAccessorInfo()) {
+    return isolate->heap()->undefined_value();
+  }
+
   UNREACHABLE();
   return NULL;
 }
@@ -2003,9 +2008,9 @@
     return *value_handle;
   }
 
-  if (structure->IsAccessorInfo()) {
+  if (structure->IsExecutableAccessorInfo()) {
     // api style callbacks
-    AccessorInfo* data = AccessorInfo::cast(structure);
+    ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure);
     if (!data->IsCompatibleReceiver(this)) {
       Handle<Object> name_handle(name);
       Handle<Object> receiver_handle(this);
@@ -2052,6 +2057,11 @@
     }
   }
 
+  // TODO(dcarney): Handle correctly.
+  if (structure->IsDeclaredAccessorInfo()) {
+    return value;
+  }
+
   UNREACHABLE();
   return NULL;
 }
@@ -9733,8 +9743,9 @@
   ASSERT(!structure->IsForeign());
 
   // api style callbacks.
-  if (structure->IsAccessorInfo()) {
-    Handle<AccessorInfo> data(AccessorInfo::cast(structure));
+  if (structure->IsExecutableAccessorInfo()) {
+    Handle<ExecutableAccessorInfo> data(
+        ExecutableAccessorInfo::cast(structure));
     Object* fun_obj = data->getter();
     v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
     if (call_fun == NULL) return isolate->heap()->undefined_value();
@@ -9770,6 +9781,11 @@
     return isolate->heap()->undefined_value();
   }
 
+  if (structure->IsDeclaredAccessorInfo()) {
+    // TODO(dcarney): Handle correctly.
+    return isolate->heap()->undefined_value();
+  }
+
   UNREACHABLE();
   return NULL;
 }
@@ -9793,11 +9809,12 @@
   // callbacks should be phased out.
   ASSERT(!structure->IsForeign());
 
-  if (structure->IsAccessorInfo()) {
+  if (structure->IsExecutableAccessorInfo()) {
     // api style callbacks
     Handle<JSObject> self(this);
     Handle<JSObject> holder_handle(JSObject::cast(holder));
-    Handle<AccessorInfo> data(AccessorInfo::cast(structure));
+    Handle<ExecutableAccessorInfo> data(
+        ExecutableAccessorInfo::cast(structure));
     Object* call_obj = data->setter();
     v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
     if (call_fun == NULL) return value;
@@ -9835,6 +9852,9 @@
     }
   }
 
+  // TODO(dcarney): Handle correctly.
+  if (structure->IsDeclaredAccessorInfo()) return value;
+
   UNREACHABLE();
   return NULL;
 }
@@ -10405,6 +10425,16 @@
          HasDictionaryArgumentsElements() ||
          (attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
   Isolate* isolate = GetIsolate();
+  if (FLAG_trace_external_array_abuse &&
+      IsExternalArrayElementsKind(GetElementsKind())) {
+    CheckArrayAbuse(this, "external elements write", index);
+  }
+  if (FLAG_trace_js_array_abuse &&
+      !IsExternalArrayElementsKind(GetElementsKind())) {
+    if (IsJSArray()) {
+      CheckArrayAbuse(this, "elements write", index, true);
+    }
+  }
   switch (GetElementsKind()) {
     case FAST_SMI_ELEMENTS:
     case FAST_ELEMENTS:
diff --git a/src/objects.h b/src/objects.h
index 1241312..1dc3865 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -111,7 +111,10 @@
 //       - Foreign
 //       - SharedFunctionInfo
 //       - Struct
+//         - DeclaredAccessorDescriptor
 //         - AccessorInfo
+//           - DeclaredAccessorInfo
+//           - ExecutableAccessorInfo
 //         - AccessorPair
 //         - AccessCheckInfo
 //         - InterceptorInfo
@@ -288,7 +291,9 @@
   V(EXTERNAL_PIXEL_ARRAY_TYPE)                                                 \
   V(FILLER_TYPE)                                                               \
                                                                                \
-  V(ACCESSOR_INFO_TYPE)                                                        \
+  V(DECLARED_ACCESSOR_DESCRIPTOR_TYPE)                                         \
+  V(DECLARED_ACCESSOR_INFO_TYPE)                                               \
+  V(EXECUTABLE_ACCESSOR_INFO_TYPE)                                             \
   V(ACCESSOR_PAIR_TYPE)                                                        \
   V(ACCESS_CHECK_INFO_TYPE)                                                    \
   V(INTERCEPTOR_INFO_TYPE)                                                     \
@@ -441,7 +446,11 @@
 // type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
 // manually.
 #define STRUCT_LIST_ALL(V)                                                     \
-  V(ACCESSOR_INFO, AccessorInfo, accessor_info)                                \
+  V(DECLARED_ACCESSOR_DESCRIPTOR,                                              \
+    DeclaredAccessorDescriptor,                                                \
+    declared_accessor_descriptor)                                              \
+  V(DECLARED_ACCESSOR_INFO, DeclaredAccessorInfo, declared_accessor_info)      \
+  V(EXECUTABLE_ACCESSOR_INFO, ExecutableAccessorInfo, executable_accessor_info)\
   V(ACCESSOR_PAIR, AccessorPair, accessor_pair)                                \
   V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info)                     \
   V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info)                       \
@@ -598,7 +607,9 @@
   FILLER_TYPE,  // LAST_DATA_TYPE
 
   // Structs.
-  ACCESSOR_INFO_TYPE,
+  DECLARED_ACCESSOR_DESCRIPTOR_TYPE,
+  DECLARED_ACCESSOR_INFO_TYPE,
+  EXECUTABLE_ACCESSOR_INFO_TYPE,
   ACCESSOR_PAIR_TYPE,
   ACCESS_CHECK_INFO_TYPE,
   INTERCEPTOR_INFO_TYPE,
@@ -926,6 +937,7 @@
 
   inline bool IsFixedArrayBase();
   inline bool IsExternal();
+  inline bool IsAccessorInfo();
 
   // Returns true if this object is an instance of the specified
   // function template.
@@ -8324,20 +8336,8 @@
 };
 
 
-// An accessor must have a getter, but can have no setter.
-//
-// When setting a property, V8 searches accessors in prototypes.
-// If an accessor was found and it does not have a setter,
-// the request is ignored.
-//
-// If the accessor in the prototype has the READ_ONLY property attribute, then
-// a new value is added to the local object when the property is set.
-// This shadows the accessor in the prototype.
 class AccessorInfo: public Struct {
  public:
-  DECL_ACCESSORS(getter, Object)
-  DECL_ACCESSORS(setter, Object)
-  DECL_ACCESSORS(data, Object)
   DECL_ACCESSORS(name, Object)
   DECL_ACCESSORS(flag, Smi)
   DECL_ACCESSORS(expected_receiver_type, Object)
@@ -8360,13 +8360,10 @@
   static inline AccessorInfo* cast(Object* obj);
 
   // Dispatched behavior.
-  DECLARE_PRINTER(AccessorInfo)
   DECLARE_VERIFIER(AccessorInfo)
 
-  static const int kGetterOffset = HeapObject::kHeaderSize;
-  static const int kSetterOffset = kGetterOffset + kPointerSize;
-  static const int kDataOffset = kSetterOffset + kPointerSize;
-  static const int kNameOffset = kDataOffset + kPointerSize;
+
+  static const int kNameOffset = HeapObject::kHeaderSize;
   static const int kFlagOffset = kNameOffset + kPointerSize;
   static const int kExpectedReceiverTypeOffset = kFlagOffset + kPointerSize;
   static const int kSize = kExpectedReceiverTypeOffset + kPointerSize;
@@ -8382,6 +8379,74 @@
 };
 
 
+class DeclaredAccessorDescriptor: public Struct {
+ public:
+  // TODO(dcarney): Fill out this class.
+  DECL_ACCESSORS(internal_field, Smi)
+
+  static inline DeclaredAccessorDescriptor* cast(Object* obj);
+
+  // Dispatched behavior.
+  DECLARE_PRINTER(DeclaredAccessorDescriptor)
+  DECLARE_VERIFIER(DeclaredAccessorDescriptor)
+
+  static const int kInternalFieldOffset = HeapObject::kHeaderSize;
+  static const int kSize = kInternalFieldOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(DeclaredAccessorDescriptor);
+};
+
+
+class DeclaredAccessorInfo: public AccessorInfo {
+ public:
+  DECL_ACCESSORS(descriptor, DeclaredAccessorDescriptor)
+
+  static inline DeclaredAccessorInfo* cast(Object* obj);
+
+  // Dispatched behavior.
+  DECLARE_PRINTER(DeclaredAccessorInfo)
+  DECLARE_VERIFIER(DeclaredAccessorInfo)
+
+  static const int kDescriptorOffset = AccessorInfo::kSize;
+  static const int kSize = kDescriptorOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(DeclaredAccessorInfo);
+};
+
+
+// An accessor must have a getter, but can have no setter.
+//
+// When setting a property, V8 searches accessors in prototypes.
+// If an accessor was found and it does not have a setter,
+// the request is ignored.
+//
+// If the accessor in the prototype has the READ_ONLY property attribute, then
+// a new value is added to the local object when the property is set.
+// This shadows the accessor in the prototype.
+class ExecutableAccessorInfo: public AccessorInfo {
+ public:
+  DECL_ACCESSORS(getter, Object)
+  DECL_ACCESSORS(setter, Object)
+  DECL_ACCESSORS(data, Object)
+
+  static inline ExecutableAccessorInfo* cast(Object* obj);
+
+  // Dispatched behavior.
+  DECLARE_PRINTER(ExecutableAccessorInfo)
+  DECLARE_VERIFIER(ExecutableAccessorInfo)
+
+  static const int kGetterOffset = AccessorInfo::kSize;
+  static const int kSetterOffset = kGetterOffset + kPointerSize;
+  static const int kDataOffset = kSetterOffset + kPointerSize;
+  static const int kSize = kDataOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExecutableAccessorInfo);
+};
+
+
 // Support for JavaScript accessors: A pair of a getter and a setter. Each
 // accessor can either be
 //   * a pointer to a JavaScript function or proxy: a real accessor
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 0c7dc14..5c763b2 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -1095,7 +1095,7 @@
 template <> struct SnapshotSizeConstants<4> {
   static const int kExpectedHeapGraphEdgeSize = 12;
   static const int kExpectedHeapEntrySize = 24;
-  static const int kExpectedHeapSnapshotsCollectionSize = 96;
+  static const int kExpectedHeapSnapshotsCollectionSize = 100;
   static const int kExpectedHeapSnapshotSize = 136;
   static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
 };
@@ -1103,7 +1103,7 @@
 template <> struct SnapshotSizeConstants<8> {
   static const int kExpectedHeapGraphEdgeSize = 24;
   static const int kExpectedHeapEntrySize = 32;
-  static const int kExpectedHeapSnapshotsCollectionSize = 144;
+  static const int kExpectedHeapSnapshotsCollectionSize = 152;
   static const int kExpectedHeapSnapshotSize = 168;
   static const uint64_t kMaxSerializableSnapshotRawSize =
       static_cast<uint64_t>(6000) * MB;
@@ -1286,9 +1286,10 @@
     HeapObjectsMap::kGcRootsFirstSubrootId +
     VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
 
-HeapObjectsMap::HeapObjectsMap()
+HeapObjectsMap::HeapObjectsMap(Heap* heap)
     : next_id_(kFirstAvailableObjectId),
-      entries_map_(AddressesMatch) {
+      entries_map_(AddressesMatch),
+      heap_(heap) {
   // This dummy element solves a problem with entries_map_.
   // When we do lookup in HashMap we see no difference between two cases:
   // it has an entry with NULL as the value or it has created
@@ -1366,7 +1367,7 @@
 void HeapObjectsMap::UpdateHeapObjectsMap() {
   HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
                           "HeapSnapshotsCollection::UpdateHeapObjectsMap");
-  HeapIterator iterator;
+  HeapIterator iterator(heap_);
   for (HeapObject* obj = iterator.next();
        obj != NULL;
        obj = iterator.next()) {
@@ -1474,10 +1475,11 @@
 }
 
 
-HeapSnapshotsCollection::HeapSnapshotsCollection()
+HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap)
     : is_tracking_objects_(false),
       snapshots_uids_(HeapSnapshotsMatch),
-      token_enumerator_(new TokenEnumerator()) {
+      token_enumerator_(new TokenEnumerator()),
+      ids_(heap) {
 }
 
 
@@ -1538,7 +1540,7 @@
                           "HeapSnapshotsCollection::FindHeapObjectById");
   AssertNoAllocation no_allocation;
   HeapObject* object = NULL;
-  HeapIterator iterator(HeapIterator::kFilterUnreachable);
+  HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
   // Make sure that object with the given id is still reachable.
   for (HeapObject* obj = iterator.next();
        obj != NULL;
@@ -2428,7 +2430,7 @@
 
 bool V8HeapExplorer::IterateAndExtractReferences(
     SnapshotFillerInterface* filler) {
-  HeapIterator iterator(HeapIterator::kFilterUnreachable);
+  HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
 
   filler_ = filler;
   bool interrupted = false;
@@ -3079,11 +3081,13 @@
 HeapSnapshotGenerator::HeapSnapshotGenerator(
     HeapSnapshot* snapshot,
     v8::ActivityControl* control,
-    v8::HeapProfiler::ObjectNameResolver* resolver)
+    v8::HeapProfiler::ObjectNameResolver* resolver,
+    Heap* heap)
     : snapshot_(snapshot),
       control_(control),
       v8_heap_explorer_(snapshot_, this, resolver),
-      dom_explorer_(snapshot_, this) {
+      dom_explorer_(snapshot_, this),
+      heap_(heap) {
 }
 
 
@@ -3154,7 +3158,7 @@
 
 void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
   if (control_ == NULL) return;
-  HeapIterator iterator(HeapIterator::kFilterUnreachable);
+  HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
   progress_total_ = iterations_count * (
       v8_heap_explorer_.EstimateObjectsCount(&iterator) +
       dom_explorer_.EstimateObjectsCount());
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 086e5c6..43c737e 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -640,7 +640,9 @@
 
 class HeapObjectsMap {
  public:
-  HeapObjectsMap();
+  explicit HeapObjectsMap(Heap* heap);
+
+  Heap* heap() const { return heap_; }
 
   void SnapshotGenerationFinished();
   SnapshotObjectId FindEntry(Address addr);
@@ -699,6 +701,7 @@
   HashMap entries_map_;
   List<EntryInfo> entries_;
   List<TimeInterval> time_intervals_;
+  Heap* heap_;
 
   DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap);
 };
@@ -706,9 +709,11 @@
 
 class HeapSnapshotsCollection {
  public:
-  HeapSnapshotsCollection();
+  explicit HeapSnapshotsCollection(Heap* heap);
   ~HeapSnapshotsCollection();
 
+  Heap* heap() const { return ids_.heap(); }
+
   bool is_tracking_objects() { return is_tracking_objects_; }
   SnapshotObjectId PushHeapObjectsStats(OutputStream* stream) {
     return ids_.PushHeapObjectsStats(stream);
@@ -1025,7 +1030,8 @@
  public:
   HeapSnapshotGenerator(HeapSnapshot* snapshot,
                         v8::ActivityControl* control,
-                        v8::HeapProfiler::ObjectNameResolver* resolver);
+                        v8::HeapProfiler::ObjectNameResolver* resolver,
+                        Heap* heap);
   bool GenerateSnapshot();
 
  private:
@@ -1043,6 +1049,7 @@
   // Used during snapshot generation.
   int progress_counter_;
   int progress_total_;
+  Heap* heap_;
 
   DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
 };
diff --git a/src/runtime.cc b/src/runtime.cc
index f808d10..bc2681b 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -12446,29 +12446,30 @@
 
   // Get the number of referencing objects.
   int count;
-  HeapIterator heap_iterator;
+  Heap* heap = isolate->heap();
+  HeapIterator heap_iterator(heap);
   count = DebugReferencedBy(&heap_iterator,
                             target, instance_filter, max_references,
                             NULL, 0, arguments_function);
 
   // Allocate an array to hold the result.
   Object* object;
-  { MaybeObject* maybe_object = isolate->heap()->AllocateFixedArray(count);
+  { MaybeObject* maybe_object = heap->AllocateFixedArray(count);
     if (!maybe_object->ToObject(&object)) return maybe_object;
   }
   FixedArray* instances = FixedArray::cast(object);
 
   // Fill the referencing objects.
   // AllocateFixedArray above does not make the heap non-iterable.
-  ASSERT(HEAP->IsHeapIterable());
-  HeapIterator heap_iterator2;
+  ASSERT(heap->IsHeapIterable());
+  HeapIterator heap_iterator2(heap);
   count = DebugReferencedBy(&heap_iterator2,
                             target, instance_filter, max_references,
                             instances, count, arguments_function);
 
   // Return result as JS array.
   Object* result;
-  MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+  MaybeObject* maybe_result = heap->AllocateJSObject(
       isolate->context()->native_context()->array_function());
   if (!maybe_result->ToObject(&result)) return maybe_result;
   return JSArray::cast(result)->SetContent(instances);
@@ -12514,8 +12515,8 @@
   ASSERT(args.length() == 2);
 
   // First perform a full GC in order to avoid dead objects.
-  isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
-                                     "%DebugConstructedBy");
+  Heap* heap = isolate->heap();
+  heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy");
 
   // Check parameters.
   CONVERT_ARG_CHECKED(JSFunction, constructor, 0);
@@ -12524,7 +12525,7 @@
 
   // Get the number of referencing objects.
   int count;
-  HeapIterator heap_iterator;
+  HeapIterator heap_iterator(heap);
   count = DebugConstructedBy(&heap_iterator,
                              constructor,
                              max_references,
@@ -12533,14 +12534,14 @@
 
   // Allocate an array to hold the result.
   Object* object;
-  { MaybeObject* maybe_object = isolate->heap()->AllocateFixedArray(count);
+  { MaybeObject* maybe_object = heap->AllocateFixedArray(count);
     if (!maybe_object->ToObject(&object)) return maybe_object;
   }
   FixedArray* instances = FixedArray::cast(object);
 
   ASSERT(HEAP->IsHeapIterable());
   // Fill the referencing objects.
-  HeapIterator heap_iterator2;
+  HeapIterator heap_iterator2(heap);
   count = DebugConstructedBy(&heap_iterator2,
                              constructor,
                              max_references,
@@ -12550,7 +12551,7 @@
   // Return result as JS array.
   Object* result;
   { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
-          isolate->context()->native_context()->array_function());
+      isolate->context()->native_context()->array_function());
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   return JSArray::cast(result)->SetContent(instances);
@@ -12677,19 +12678,20 @@
   Handle<FixedArray> array;
   array = isolate->factory()->NewFixedArray(kBufferSize);
   int number;
+  Heap* heap = isolate->heap();
   {
-    isolate->heap()->EnsureHeapIsIterable();
+    heap->EnsureHeapIsIterable();
     AssertNoAllocation no_allocations;
-    HeapIterator heap_iterator;
+    HeapIterator heap_iterator(heap);
     Script* scr = *script;
     FixedArray* arr = *array;
     number = FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
   }
   if (number > kBufferSize) {
     array = isolate->factory()->NewFixedArray(number);
-    isolate->heap()->EnsureHeapIsIterable();
+    heap->EnsureHeapIsIterable();
     AssertNoAllocation no_allocations;
-    HeapIterator heap_iterator;
+    HeapIterator heap_iterator(heap);
     Script* scr = *script;
     FixedArray* arr = *array;
     FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
@@ -13026,9 +13028,10 @@
   // Scan the heap for Script objects to find the script with the requested
   // script data.
   Handle<Script> script;
-  script_name->GetHeap()->EnsureHeapIsIterable();
+  Heap* heap = script_name->GetHeap();
+  heap->EnsureHeapIsIterable();
   AssertNoAllocation no_allocation_during_heap_iteration;
-  HeapIterator iterator;
+  HeapIterator iterator(heap);
   HeapObject* obj = NULL;
   while (script.is_null() && ((obj = iterator.next()) != NULL)) {
     // If a script is found check if it has the script data requested.
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 06b4782..2af6b64 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -162,10 +162,11 @@
 }
 
 
-Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name,
-                                            Handle<JSObject> receiver,
-                                            Handle<JSObject> holder,
-                                            Handle<AccessorInfo> callback) {
+Handle<Code> StubCache::ComputeLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<ExecutableAccessorInfo> callback) {
   ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
   InlineCacheHolderFlag cache_holder =
       IC::GetCodeCacheForObject(*receiver, *holder);
@@ -353,7 +354,7 @@
     Handle<String> name,
     Handle<JSObject> receiver,
     Handle<JSObject> holder,
-    Handle<AccessorInfo> callback) {
+    Handle<ExecutableAccessorInfo> callback) {
   InlineCacheHolderFlag cache_holder =
       IC::GetCodeCacheForObject(*receiver, *holder);
   Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
@@ -469,11 +470,12 @@
 }
 
 
-Handle<Code> StubCache::ComputeStoreCallback(Handle<String> name,
-                                             Handle<JSObject> receiver,
-                                             Handle<JSObject> holder,
-                                             Handle<AccessorInfo> callback,
-                                             StrictModeFlag strict_mode) {
+Handle<Code> StubCache::ComputeStoreCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<ExecutableAccessorInfo> callback,
+    StrictModeFlag strict_mode) {
   ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::STORE_IC, Code::CALLBACKS, strict_mode);
@@ -1004,7 +1006,7 @@
   ASSERT(args[0]->IsJSObject());
   ASSERT(args[1]->IsJSObject());
   ASSERT(args[3]->IsSmi());
-  AccessorInfo* callback = AccessorInfo::cast(args[4]);
+  ExecutableAccessorInfo* callback = ExecutableAccessorInfo::cast(args[4]);
   Address getter_address = v8::ToCData<Address>(callback->getter());
   v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
   ASSERT(fun != NULL);
@@ -1028,7 +1030,7 @@
 
 RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
   JSObject* recv = JSObject::cast(args[0]);
-  AccessorInfo* callback = AccessorInfo::cast(args[1]);
+  ExecutableAccessorInfo* callback = ExecutableAccessorInfo::cast(args[1]);
   Address setter_address = v8::ToCData<Address>(callback->setter());
   v8::AccessorSetter fun = FUNCTION_CAST<v8::AccessorSetter>(setter_address);
   ASSERT(fun != NULL);
@@ -1415,7 +1417,7 @@
     Handle<JSObject> object,
     Handle<JSObject> holder,
     Handle<String> name,
-    Handle<AccessorInfo> callback) {
+    Handle<ExecutableAccessorInfo> callback) {
   Label miss;
 
   GenerateNameCheck(name, this->name(), &miss);
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 740a7c8..7f0d563 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -88,7 +88,7 @@
   Handle<Code> ComputeLoadCallback(Handle<String> name,
                                    Handle<JSObject> object,
                                    Handle<JSObject> holder,
-                                   Handle<AccessorInfo> callback);
+                                   Handle<ExecutableAccessorInfo> callback);
 
   Handle<Code> ComputeLoadViaGetter(Handle<String> name,
                                     Handle<JSObject> object,
@@ -119,10 +119,11 @@
                                      Handle<JSObject> holder,
                                      PropertyIndex field_index);
 
-  Handle<Code> ComputeKeyedLoadCallback(Handle<String> name,
-                                        Handle<JSObject> object,
-                                        Handle<JSObject> holder,
-                                        Handle<AccessorInfo> callback);
+  Handle<Code> ComputeKeyedLoadCallback(
+      Handle<String> name,
+      Handle<JSObject> object,
+      Handle<JSObject> holder,
+      Handle<ExecutableAccessorInfo> callback);
 
   Handle<Code> ComputeKeyedLoadConstant(Handle<String> name,
                                         Handle<JSObject> object,
@@ -151,7 +152,7 @@
   Handle<Code> ComputeStoreCallback(Handle<String> name,
                                     Handle<JSObject> object,
                                     Handle<JSObject> holder,
-                                    Handle<AccessorInfo> callback,
+                                    Handle<ExecutableAccessorInfo> callback,
                                     StrictModeFlag strict_mode);
 
   Handle<Code> ComputeStoreViaSetter(Handle<String> name,
@@ -553,7 +554,7 @@
                             Register scratch2,
                             Register scratch3,
                             Register scratch4,
-                            Handle<AccessorInfo> callback,
+                            Handle<ExecutableAccessorInfo> callback,
                             Handle<String> name,
                             Label* miss);
 
@@ -562,7 +563,7 @@
                                       Register scratch1,
                                       Register scratch2,
                                       Register scratch3,
-                                      Handle<AccessorInfo> callback,
+                                      Handle<ExecutableAccessorInfo> callback,
                                       Handle<String> name,
                                       Label* miss);
 
@@ -616,7 +617,7 @@
   Handle<Code> CompileLoadCallback(Handle<JSObject> object,
                                    Handle<JSObject> holder,
                                    Handle<String> name,
-                                   Handle<AccessorInfo> callback);
+                                   Handle<ExecutableAccessorInfo> callback);
 
   Handle<Code> CompileLoadConstant(Handle<JSObject> object,
                                    Handle<JSObject> holder,
@@ -720,7 +721,7 @@
   Handle<Code> CompileStoreCallback(Handle<String> name,
                                     Handle<JSObject> object,
                                     Handle<JSObject> holder,
-                                    Handle<AccessorInfo> callback);
+                                    Handle<ExecutableAccessorInfo> callback);
 
   static void GenerateStoreViaSetter(MacroAssembler* masm,
                                      Handle<JSFunction> setter);
diff --git a/src/sweeper-thread.cc b/src/sweeper-thread.cc
index 7e31e6c..df5fcc6 100644
--- a/src/sweeper-thread.cc
+++ b/src/sweeper-thread.cc
@@ -35,8 +35,10 @@
 namespace v8 {
 namespace internal {
 
+static const int kSweeperThreadStackSize = 64 * KB;
+
 SweeperThread::SweeperThread(Isolate* isolate)
-     : Thread("SweeperThread"),
+     : Thread(Thread::Options("v8:SweeperThread", kSweeperThreadStackSize)),
        isolate_(isolate),
        heap_(isolate->heap()),
        collector_(heap_->mark_compact_collector()),
@@ -52,9 +54,6 @@
 }
 
 
-bool SweeperThread::sweeping_pending_ = false;
-
-
 void SweeperThread::Run() {
   Isolate::SetIsolateThreadLocals(isolate_, NULL);
   while (true) {
@@ -75,6 +74,7 @@
   }
 }
 
+
 intptr_t SweeperThread::StealMemory(PagedSpace* space) {
   intptr_t free_bytes = 0;
   if (space->identity() == OLD_POINTER_SPACE) {
diff --git a/src/sweeper-thread.h b/src/sweeper-thread.h
index ba793c2..a170982 100644
--- a/src/sweeper-thread.h
+++ b/src/sweeper-thread.h
@@ -50,11 +50,6 @@
   void WaitForSweeperThread();
   intptr_t StealMemory(PagedSpace* space);
 
-  static bool sweeping_pending() { return sweeping_pending_; }
-  static void set_sweeping_pending(bool sweeping_pending) {
-    sweeping_pending_ = sweeping_pending;
-  }
-
   ~SweeperThread() {
     delete start_sweeping_semaphore_;
     delete end_sweeping_semaphore_;
@@ -73,7 +68,6 @@
   FreeList private_free_list_old_data_space_;
   FreeList private_free_list_old_pointer_space_;
   volatile AtomicWord stop_thread_;
-  static bool sweeping_pending_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/token.h b/src/token.h
index 863ba62..4078a15 100644
--- a/src/token.h
+++ b/src/token.h
@@ -230,26 +230,30 @@
       case EQ: return NE;
       case NE: return EQ;
       case EQ_STRICT: return NE_STRICT;
+      case NE_STRICT: return EQ_STRICT;
       case LT: return GTE;
       case GT: return LTE;
       case LTE: return GT;
       case GTE: return LT;
       default:
+        UNREACHABLE();
         return op;
     }
   }
 
-  static Value InvertCompareOp(Value op) {
+  static Value ReverseCompareOp(Value op) {
     ASSERT(IsCompareOp(op));
     switch (op) {
-      case EQ: return NE;
-      case NE: return EQ;
-      case EQ_STRICT: return NE_STRICT;
+      case EQ: return EQ;
+      case NE: return NE;
+      case EQ_STRICT: return EQ_STRICT;
+      case NE_STRICT: return NE_STRICT;
       case LT: return GT;
       case GT: return LT;
       case LTE: return GTE;
       case GTE: return LTE;
       default:
+        UNREACHABLE();
         return op;
     }
   }
diff --git a/src/v8.cc b/src/v8.cc
index 6c86d8e..1753650 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -115,6 +115,7 @@
   LOperand::TearDownCaches();
   ExternalReference::TearDownMathExpData();
   RegisteredExtension::UnregisterAll();
+  Isolate::GlobalTearDown();
 
   is_running_ = false;
   has_been_disposed_ = true;
diff --git a/src/version.cc b/src/version.cc
index 5286f43..c8c0617 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,9 +33,9 @@
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     16
-#define BUILD_NUMBER      14
-#define PATCH_LEVEL       1
+#define MINOR_VERSION     17
+#define BUILD_NUMBER      0
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index b3c4bc8..0be9626 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -719,10 +719,21 @@
     return;
   }
 
+  ASSERT(FLAG_deopt_every_n_times == 0);  // Not yet implemented on x64.
+
+  if (FLAG_trap_on_deopt) {
+    Label done;
+    if (cc != no_condition) {
+      __ j(NegateCondition(cc), &done, Label::kNear);
+    }
+    __ int3();
+    __ bind(&done);
+  }
+
   ASSERT(info()->IsStub() || frame_is_built_);
-  bool lazy_deopt = info()->IsStub();
-  if (cc == no_condition) {
-    if (lazy_deopt) {
+  bool needs_lazy_deopt = info()->IsStub();
+  if (cc == no_condition && frame_is_built_) {
+    if (needs_lazy_deopt) {
       __ Call(entry, RelocInfo::RUNTIME_ENTRY);
     } else {
       __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
@@ -733,11 +744,15 @@
     if (jump_table_.is_empty() ||
         jump_table_.last().address != entry ||
         jump_table_.last().needs_frame != !frame_is_built_ ||
-        jump_table_.last().is_lazy_deopt != lazy_deopt) {
-      JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt);
+        jump_table_.last().is_lazy_deopt != needs_lazy_deopt) {
+      JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
       jump_table_.Add(table_entry, zone());
     }
-    __ j(cc, &jump_table_.last().label);
+    if (cc == no_condition) {
+      __ jmp(&jump_table_.last().label);
+    } else {
+      __ j(cc, &jump_table_.last().label);
+    }
   }
 }
 
@@ -3916,28 +3931,9 @@
 }
 
 
-void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
-                                      HValue* value,
-                                      LOperand* operand) {
-  if (value->representation().IsTagged() && !value->type().IsSmi()) {
-    Condition cc;
-    if (operand->IsRegister()) {
-      cc = masm()->CheckSmi(ToRegister(operand));
-    } else {
-      cc = masm()->CheckSmi(ToOperand(operand));
-    }
-    DeoptimizeIf(NegateCondition(cc), environment);
-  }
-}
-
-
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
-  DeoptIfTaggedButNotSmi(instr->environment(),
-                         instr->hydrogen()->length(),
-                         instr->length());
-  DeoptIfTaggedButNotSmi(instr->environment(),
-                         instr->hydrogen()->index(),
-                         instr->index());
+  if (instr->hydrogen()->skip_check()) return;
+
   if (instr->length()->IsRegister()) {
     Register reg = ToRegister(instr->length());
     if (!instr->hydrogen()->length()->representation().IsTagged()) {
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 8c6b5d2..b0aa2c2 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -291,11 +291,6 @@
       LEnvironment* env,
       NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
 
-
-  void DeoptIfTaggedButNotSmi(LEnvironment* environment,
-                              HValue* value,
-                              LOperand* operand);
-
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
   // true and false label should be made, to optimize fallthrough.
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index e35a381..4194ad6 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1636,6 +1636,11 @@
 }
 
 
+LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
+  return NULL;
+}
+
+
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
   LOperand* value = UseRegisterOrConstantAtStart(instr->index());
   LOperand* length = Use(instr->length());
@@ -1766,6 +1771,12 @@
 }
 
 
+LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
 LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
   return AssignEnvironment(new(zone()) LCheckFunction(value));
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 91d13ba..2b4325d 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1043,14 +1043,15 @@
 }
 
 
-void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
-                                                  Register name_reg,
-                                                  Register scratch1,
-                                                  Register scratch2,
-                                                  Register scratch3,
-                                                  Handle<AccessorInfo> callback,
-                                                  Handle<String> name,
-                                                  Label* miss) {
+void StubCompiler::GenerateDictionaryLoadCallback(
+    Register receiver,
+    Register name_reg,
+    Register scratch1,
+    Register scratch2,
+    Register scratch3,
+    Handle<ExecutableAccessorInfo> callback,
+    Handle<String> name,
+    Label* miss) {
   ASSERT(!receiver.is(scratch1));
   ASSERT(!receiver.is(scratch2));
   ASSERT(!receiver.is(scratch3));
@@ -1094,7 +1095,7 @@
                                         Register scratch2,
                                         Register scratch3,
                                         Register scratch4,
-                                        Handle<AccessorInfo> callback,
+                                        Handle<ExecutableAccessorInfo> callback,
                                         Handle<String> name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
@@ -1117,7 +1118,7 @@
   __ push(reg);  // holder
   if (heap()->InNewSpace(callback->data())) {
     __ Move(scratch1, callback);
-    __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));  // data
+    __ push(FieldOperand(scratch1, ExecutableAccessorInfo::kDataOffset));
   } else {
     __ Push(Handle<Object>(callback->data()));
   }
@@ -1209,8 +1210,9 @@
     if (lookup->IsField()) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-               lookup->GetCallbackObject()->IsAccessorInfo()) {
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+               lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+      ExecutableAccessorInfo* callback =
+          ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
       compile_followup_inline = callback->getter() != NULL &&
           callback->IsCompatibleReceiver(*object);
     }
@@ -1295,8 +1297,8 @@
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      Handle<AccessorInfo> callback(
-          AccessorInfo::cast(lookup->GetCallbackObject()));
+      Handle<ExecutableAccessorInfo> callback(
+          ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
@@ -1306,7 +1308,7 @@
       __ push(receiver);
       __ push(holder_reg);
       __ Move(holder_reg, callback);
-      __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
+      __ push(FieldOperand(holder_reg, ExecutableAccessorInfo::kDataOffset));
       __ PushAddress(ExternalReference::isolate_address());
       __ push(holder_reg);
       __ push(name_reg);
@@ -2482,7 +2484,7 @@
     Handle<String> name,
     Handle<JSObject> receiver,
     Handle<JSObject> holder,
-    Handle<AccessorInfo> callback) {
+    Handle<ExecutableAccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : name