Version 3.20.0

Migrated several tests from blink to V8 repository.

Allowed users of the V8 API to distinguish between unset and undefined HiddenValues (issue 2746).

Deprecated old style callbacks in the V8 API.

Turned on parallel recompilation.

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@15386 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/api.cc b/src/api.cc
index 9b1d01c..edffca1 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -300,8 +300,13 @@
 // --- S t a t i c s ---
 
 
-static bool InitializeHelper() {
-  if (i::Snapshot::Initialize()) return true;
+static bool InitializeHelper(i::Isolate* isolate) {
+  // If the isolate has a function entry hook, it needs to re-build all its
+  // code stubs with entry hooks embedded, so let's deserialize a snapshot.
+  if (isolate == NULL || isolate->function_entry_hook() == NULL) {
+    if (i::Snapshot::Initialize())
+      return true;
+  }
   return i::V8::Initialize(NULL);
 }
 
@@ -313,7 +318,7 @@
     if (isolate->IsInitialized()) return true;
   }
   ASSERT(isolate == i::Isolate::Current());
-  return ApiCheck(InitializeHelper(), location, "Error initializing V8");
+  return ApiCheck(InitializeHelper(isolate), location, "Error initializing V8");
 }
 
 // Some initializing API functions are called early and may be
@@ -961,6 +966,22 @@
 }
 
 
+// TODO(dcarney): Remove this abstraction when old callbacks are removed.
+class CallHandlerHelper {
+ public:
+  static inline void Set(Local<FunctionTemplate> function_template,
+                         InvocationCallback callback,
+                         v8::Handle<Value> data) {
+    function_template->SetCallHandlerInternal(callback, data);
+  }
+  static inline void Set(Local<FunctionTemplate> function_template,
+                         FunctionCallback callback,
+                         v8::Handle<Value> data) {
+    function_template->SetCallHandler(callback, data);
+  }
+};
+
+
 template<typename Callback>
 static Local<FunctionTemplate> FunctionTemplateNew(
     Callback callback,
@@ -981,7 +1002,7 @@
   obj->set_serial_number(i::Smi::FromInt(next_serial_number));
   if (callback != 0) {
     if (data.IsEmpty()) data = v8::Undefined();
-    Utils::ToLocal(obj)->SetCallHandler(callback, data);
+    CallHandlerHelper::Set(Utils::ToLocal(obj), callback, data);
   }
   obj->set_length(length);
   obj->set_undetectable(false);
@@ -1225,6 +1246,11 @@
   FunctionTemplateSetCallHandler(this, callback, data);
 }
 
+void FunctionTemplate::SetCallHandlerInternal(InvocationCallback callback,
+                                              v8::Handle<Value> data) {
+  FunctionTemplateSetCallHandler(this, callback, data);
+}
+
 void FunctionTemplate::SetCallHandler(FunctionCallback callback,
                                       v8::Handle<Value> data) {
   FunctionTemplateSetCallHandler(this, callback, data);
@@ -2005,6 +2031,19 @@
 }
 
 
+int Script::GetId() {
+  i::Isolate* isolate = i::Isolate::Current();
+  ON_BAILOUT(isolate, "v8::Script::Id()", return -1);
+  LOG_API(isolate, "Script::Id");
+  {
+    i::HandleScope scope(isolate);
+    i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
+    i::Handle<i::Script> script(i::Script::cast(function_info->script()));
+    return script->id()->value();
+  }
+}
+
+
 int Script::GetLineNumber(int code_pos) {
   i::Isolate* isolate = i::Isolate::Current();
   ON_BAILOUT(isolate, "v8::Script::GetLineNumber()", return -1);
@@ -2573,6 +2612,11 @@
 }
 
 
+bool Value::IsArrayBufferView() const {
+  return Utils::OpenHandle(this)->IsJSArrayBufferView();
+}
+
+
 bool Value::IsTypedArray() const {
   if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArrayBuffer()"))
     return false;
@@ -2606,6 +2650,11 @@
 #undef VALUE_IS_TYPED_ARRAY
 
 
+bool Value::IsDataView() const {
+  return Utils::OpenHandle(this)->IsJSDataView();
+}
+
+
 bool Value::IsObject() const {
   if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsObject()")) return false;
   return Utils::OpenHandle(this)->IsJSObject();
@@ -2872,6 +2921,16 @@
 }
 
 
+#ifdef V8_ENABLE_CHECKS
+void i::Internals::CheckInitialized(v8::Isolate* external_isolate) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
+  ApiCheck(isolate != NULL && isolate->IsInitialized() && !i::V8::IsDead(),
+           "v8::internal::Internals::CheckInitialized()",
+           "Isolate is not initialized or V8 has died");
+}
+#endif
+
+
 void External::CheckCast(v8::Value* that) {
   if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return;
   ApiCheck(Utils::OpenHandle(that)->IsExternal(),
@@ -2952,6 +3011,14 @@
 }
 
 
+void v8::ArrayBufferView::CheckCast(Value* that) {
+  i::Handle<i::Object> obj = Utils::OpenHandle(that);
+  ApiCheck(obj->IsJSArrayBufferView(),
+           "v8::ArrayBufferView::Cast()",
+           "Could not convert to ArrayBufferView");
+}
+
+
 void v8::TypedArray::CheckCast(Value* that) {
   if (IsDeadCheck(i::Isolate::Current(), "v8::TypedArray::Cast()")) return;
   i::Handle<i::Object> obj = Utils::OpenHandle(that);
@@ -2978,6 +3045,14 @@
 #undef CHECK_TYPED_ARRAY_CAST
 
 
+void v8::DataView::CheckCast(Value* that) {
+  i::Handle<i::Object> obj = Utils::OpenHandle(that);
+  ApiCheck(obj->IsJSDataView(),
+           "v8::DataView::Cast()",
+           "Could not convert to DataView");
+}
+
+
 void v8::Date::CheckCast(v8::Value* that) {
   i::Isolate* isolate = i::Isolate::Current();
   if (IsDeadCheck(isolate, "v8::Date::Cast()")) return;
@@ -3865,7 +3940,7 @@
   i::Handle<i::String> key_string =
       isolate->factory()->InternalizeString(key_obj);
   i::Handle<i::Object> result(self->GetHiddenProperty(*key_string), isolate);
-  if (result->IsUndefined()) return v8::Local<v8::Value>();
+  if (result->IsTheHole()) return v8::Local<v8::Value>();
   return Utils::ToLocal(result);
 }
 
@@ -4279,6 +4354,7 @@
   return kLineOffsetNotFound;
 }
 
+
 Handle<Value> Function::GetScriptId() const {
   i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
   if (!func->shared()->script()->IsScript())
@@ -4287,6 +4363,15 @@
   return Utils::ToLocal(i::Handle<i::Object>(script->id(), func->GetIsolate()));
 }
 
+
+int Function::ScriptId() const {
+  i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+  if (!func->shared()->script()->IsScript()) return v8::Script::kNoScriptId;
+  i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+  return script->id()->value();
+}
+
+
 int String::Length() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
   if (IsDeadCheck(str->GetIsolate(), "v8::String::Length()")) return 0;
@@ -5131,7 +5216,7 @@
   if (isolate != NULL && isolate->IsInitialized()) {
     return true;
   }
-  return InitializeHelper();
+  return InitializeHelper(isolate);
 }
 
 
@@ -5147,7 +5232,30 @@
 
 
 bool v8::V8::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
-  return i::ProfileEntryHookStub::SetFunctionEntryHook(entry_hook);
+  return SetFunctionEntryHook(Isolate::GetCurrent(), entry_hook);
+}
+
+
+bool v8::V8::SetFunctionEntryHook(Isolate* ext_isolate,
+                                  FunctionEntryHook entry_hook) {
+  ASSERT(ext_isolate != NULL);
+  ASSERT(entry_hook != NULL);
+
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(ext_isolate);
+
+  // The entry hook can only be set before the Isolate is initialized, as
+  // otherwise the Isolate's code stubs generated at initialization won't
+  // contain entry hooks.
+  if (isolate->IsInitialized())
+    return false;
+
+  // Setting an entry hook is a one-way operation, once set, it cannot be
+  // changed or unset.
+  if (isolate->function_entry_hook() != NULL)
+    return false;
+
+  isolate->set_function_entry_hook(entry_hook);
+  return true;
 }
 
 
@@ -6161,11 +6269,17 @@
   LOG_API(obj->GetIsolate(), "v8::ArrayBuffer::Neuter()");
   ENTER_V8(isolate);
 
-  for (i::Handle<i::Object> array_obj(obj->weak_first_array(), isolate);
-       !array_obj->IsUndefined();) {
-    i::Handle<i::JSTypedArray> typed_array(i::JSTypedArray::cast(*array_obj));
-    typed_array->Neuter();
-    array_obj = i::handle(typed_array->weak_next(), isolate);
+  for (i::Handle<i::Object> view_obj(obj->weak_first_view(), isolate);
+       !view_obj->IsUndefined();) {
+    i::Handle<i::JSArrayBufferView> view(i::JSArrayBufferView::cast(*view_obj));
+    if (view->IsJSTypedArray()) {
+      i::JSTypedArray::cast(*view)->Neuter();
+    } else if (view->IsJSDataView()) {
+      i::JSDataView::cast(*view)->Neuter();
+    } else {
+      UNREACHABLE();
+    }
+    view_obj = i::handle(view->weak_next(), isolate);
   }
   obj->Neuter();
 }
@@ -6203,33 +6317,35 @@
 }
 
 
-Local<ArrayBuffer> v8::TypedArray::Buffer() {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  if (IsDeadCheck(isolate, "v8::TypedArray::Buffer()"))
-    return Local<ArrayBuffer>();
-  i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
+  i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
   ASSERT(obj->buffer()->IsJSArrayBuffer());
   i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
   return Utils::ToLocal(buffer);
 }
 
 
-size_t v8::TypedArray::ByteOffset() {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  if (IsDeadCheck(isolate, "v8::TypedArray::ByteOffset()")) return 0;
-  i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+size_t v8::ArrayBufferView::ByteOffset() {
+  i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
   return static_cast<size_t>(obj->byte_offset()->Number());
 }
 
 
-size_t v8::TypedArray::ByteLength() {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  if (IsDeadCheck(isolate, "v8::TypedArray::ByteLength()")) return 0;
-  i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+size_t v8::ArrayBufferView::ByteLength() {
+  i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
   return static_cast<size_t>(obj->byte_length()->Number());
 }
 
 
+void* v8::ArrayBufferView::BaseAddress() {
+  i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
+  i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
+  void* buffer_data = buffer->backing_store();
+  size_t byte_offset = static_cast<size_t>(obj->byte_offset()->Number());
+  return static_cast<uint8_t*>(buffer_data) + byte_offset;
+}
+
+
 size_t v8::TypedArray::Length() {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   if (IsDeadCheck(isolate, "v8::TypedArray::Length()")) return 0;
@@ -6238,16 +6354,28 @@
 }
 
 
-void* v8::TypedArray::BaseAddress() {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  if (IsDeadCheck(isolate, "v8::TypedArray::BaseAddress()")) return NULL;
-  i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
-  i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
-  void* buffer_data = buffer->backing_store();
-  size_t byte_offset = static_cast<size_t>(obj->byte_offset()->Number());
-  return static_cast<uint8_t*>(buffer_data) + byte_offset;
-}
+static inline void SetupArrayBufferView(
+    i::Isolate* isolate,
+    i::Handle<i::JSArrayBufferView> obj,
+    i::Handle<i::JSArrayBuffer> buffer,
+    size_t byte_offset,
+    size_t byte_length) {
+  ASSERT(byte_offset + byte_length <=
+      static_cast<size_t>(buffer->byte_length()->Number()));
 
+  obj->set_buffer(*buffer);
+
+  obj->set_weak_next(buffer->weak_first_view());
+  buffer->set_weak_first_view(*obj);
+
+  i::Handle<i::Object> byte_offset_object =
+    isolate->factory()->NewNumberFromSize(byte_offset);
+  obj->set_byte_offset(*byte_offset_object);
+
+  i::Handle<i::Object> byte_length_object =
+    isolate->factory()->NewNumberFromSize(byte_length);
+  obj->set_byte_length(*byte_length_object);
+}
 
 template<typename ElementType,
          ExternalArrayType array_type,
@@ -6260,24 +6388,12 @@
   i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
 
   ASSERT(byte_offset % sizeof(ElementType) == 0);
-  ASSERT(byte_offset + length * sizeof(ElementType) <=
-      static_cast<size_t>(buffer->byte_length()->Number()));
 
-  obj->set_buffer(*buffer);
+  SetupArrayBufferView(
+      isolate, obj, buffer, byte_offset, length * sizeof(ElementType));
 
-  obj->set_weak_next(buffer->weak_first_array());
-  buffer->set_weak_first_array(*obj);
-
-  i::Handle<i::Object> byte_offset_object = isolate->factory()->NewNumber(
-        static_cast<double>(byte_offset));
-  obj->set_byte_offset(*byte_offset_object);
-
-  i::Handle<i::Object> byte_length_object = isolate->factory()->NewNumber(
-        static_cast<double>(length * sizeof(ElementType)));
-  obj->set_byte_length(*byte_length_object);
-
-  i::Handle<i::Object> length_object = isolate->factory()->NewNumber(
-        static_cast<double>(length));
+  i::Handle<i::Object> length_object =
+    isolate->factory()->NewNumberFromSize(length);
   obj->set_length(*length_object);
 
   i::Handle<i::ExternalArray> elements =
@@ -6326,6 +6442,20 @@
 
 #undef TYPED_ARRAY_NEW
 
+Local<DataView> DataView::New(Handle<ArrayBuffer> array_buffer,
+                              size_t byte_offset, size_t byte_length) {
+  i::Isolate* isolate = i::Isolate::Current();
+  EnsureInitializedForIsolate(
+      isolate, "v8::DataView::New(void*, size_t, size_t)");
+  LOG_API(isolate, "v8::DataView::New(void*, size_t, size_t)");
+  ENTER_V8(isolate);
+  i::Handle<i::JSDataView> obj = isolate->factory()->NewJSDataView();
+  i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
+  SetupArrayBufferView(
+      isolate, obj, buffer, byte_offset, byte_length);
+  return Utils::ToLocal(obj);
+}
+
 
 Local<Symbol> v8::Symbol::New(Isolate* isolate) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -6538,10 +6668,11 @@
 
 v8::Local<v8::Context> Isolate::GetCurrentContext() {
   i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
-  i::Handle<i::Object> current = internal_isolate->native_context();
-  if (current.is_null()) return Local<Context>();
-  i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
-  return Utils::ToLocal(context);
+  i::Context* context = internal_isolate->context();
+  if (context == NULL) return Local<Context>();
+  i::Context* native_context = context->global_object()->native_context();
+  if (native_context == NULL) return Local<Context>();
+  return Utils::ToLocal(i::Handle<i::Context>(native_context));
 }
 
 
@@ -7304,33 +7435,11 @@
 }
 
 
-int CpuProfiler::GetProfilesCount() {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::CpuProfiler::GetProfilesCount");
-  i::CpuProfiler* profiler = isolate->cpu_profiler();
-  ASSERT(profiler != NULL);
-  return profiler->GetProfilesCount();
-}
-
-
 int CpuProfiler::GetProfileCount() {
   return reinterpret_cast<i::CpuProfiler*>(this)->GetProfilesCount();
 }
 
 
-const CpuProfile* CpuProfiler::GetProfile(int index,
-                                          Handle<Value> security_token) {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::CpuProfiler::GetProfile");
-  i::CpuProfiler* profiler = isolate->cpu_profiler();
-  ASSERT(profiler != NULL);
-  return reinterpret_cast<const CpuProfile*>(
-      profiler->GetProfile(
-          security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
-          index));
-}
-
-
 const CpuProfile* CpuProfiler::GetCpuProfile(int index,
                                              Handle<Value> security_token) {
   return reinterpret_cast<const CpuProfile*>(
@@ -7346,19 +7455,6 @@
 }
 
 
-const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
-                                           Handle<Value> security_token) {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::CpuProfiler::FindProfile");
-  i::CpuProfiler* profiler = isolate->cpu_profiler();
-  ASSERT(profiler != NULL);
-  return reinterpret_cast<const CpuProfile*>(
-      profiler->FindProfile(
-          security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
-          uid));
-}
-
-
 const CpuProfile* CpuProfiler::FindCpuProfile(unsigned uid,
                                               Handle<Value> security_token) {
   return reinterpret_cast<const CpuProfile*>(
@@ -7368,34 +7464,12 @@
 }
 
 
-void CpuProfiler::StartProfiling(Handle<String> title, bool record_samples) {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::CpuProfiler::StartProfiling");
-  i::CpuProfiler* profiler = isolate->cpu_profiler();
-  ASSERT(profiler != NULL);
-  profiler->StartProfiling(*Utils::OpenHandle(*title), record_samples);
-}
-
-
 void CpuProfiler::StartCpuProfiling(Handle<String> title, bool record_samples) {
   reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
       *Utils::OpenHandle(*title), record_samples);
 }
 
 
-const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
-                                             Handle<Value> security_token) {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::CpuProfiler::StopProfiling");
-  i::CpuProfiler* profiler = isolate->cpu_profiler();
-  ASSERT(profiler != NULL);
-  return reinterpret_cast<const CpuProfile*>(
-      profiler->StopProfiling(
-          security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
-          *Utils::OpenHandle(*title)));
-}
-
-
 const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title,
                                                 Handle<Value> security_token) {
   return reinterpret_cast<const CpuProfile*>(
@@ -7413,15 +7487,6 @@
 }
 
 
-void CpuProfiler::DeleteAllProfiles() {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::CpuProfiler::DeleteAllProfiles");
-  i::CpuProfiler* profiler = isolate->cpu_profiler();
-  ASSERT(profiler != NULL);
-  profiler->DeleteAllProfiles();
-}
-
-
 void CpuProfiler::DeleteAllCpuProfiles() {
   reinterpret_cast<i::CpuProfiler*>(this)->DeleteAllProfiles();
 }
@@ -7555,13 +7620,6 @@
 }
 
 
-HeapSnapshot::Type HeapSnapshot::GetType() const {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::HeapSnapshot::GetType");
-  return kFull;
-}
-
-
 unsigned HeapSnapshot::GetUid() const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapSnapshot::GetUid");
@@ -7632,72 +7690,29 @@
 }
 
 
-int HeapProfiler::GetSnapshotsCount() {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotsCount");
-  return isolate->heap_profiler()->GetSnapshotsCount();
-}
-
-
 int HeapProfiler::GetSnapshotCount() {
   return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotsCount();
 }
 
 
-const HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshot");
-  return reinterpret_cast<const HeapSnapshot*>(
-      isolate->heap_profiler()->GetSnapshot(index));
-}
-
-
 const HeapSnapshot* HeapProfiler::GetHeapSnapshot(int index) {
   return reinterpret_cast<const HeapSnapshot*>(
       reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshot(index));
 }
 
 
-const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::HeapProfiler::FindSnapshot");
-  return reinterpret_cast<const HeapSnapshot*>(
-      isolate->heap_profiler()->FindSnapshot(uid));
-}
-
-
 const HeapSnapshot* HeapProfiler::FindHeapSnapshot(unsigned uid) {
   return reinterpret_cast<const HeapSnapshot*>(
       reinterpret_cast<i::HeapProfiler*>(this)->FindSnapshot(uid));
 }
 
 
-SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Value> value) {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotObjectId");
-  i::Handle<i::Object> obj = Utils::OpenHandle(*value);
-  return isolate->heap_profiler()->GetSnapshotObjectId(obj);
-}
-
-
 SnapshotObjectId HeapProfiler::GetObjectId(Handle<Value> value) {
   i::Handle<i::Object> obj = Utils::OpenHandle(*value);
   return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotObjectId(obj);
 }
 
 
-const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
-                                               HeapSnapshot::Type type,
-                                               ActivityControl* control,
-                                               ObjectNameResolver* resolver) {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot");
-  return reinterpret_cast<const HeapSnapshot*>(
-      isolate->heap_profiler()->TakeSnapshot(
-          *Utils::OpenHandle(*title), control, resolver));
-}
-
-
 const HeapSnapshot* HeapProfiler::TakeHeapSnapshot(
     Handle<String> title,
     ActivityControl* control,
@@ -7708,61 +7723,26 @@
 }
 
 
-void HeapProfiler::StartHeapObjectsTracking() {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::HeapProfiler::StartHeapObjectsTracking");
-  isolate->heap_profiler()->StartHeapObjectsTracking();
-}
-
-
 void HeapProfiler::StartTrackingHeapObjects() {
   reinterpret_cast<i::HeapProfiler*>(this)->StartHeapObjectsTracking();
 }
 
 
-void HeapProfiler::StopHeapObjectsTracking() {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::HeapProfiler::StopHeapObjectsTracking");
-  isolate->heap_profiler()->StopHeapObjectsTracking();
-}
-
-
 void HeapProfiler::StopTrackingHeapObjects() {
   reinterpret_cast<i::HeapProfiler*>(this)->StopHeapObjectsTracking();
 }
 
 
-SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::HeapProfiler::PushHeapObjectsStats");
-  return isolate->heap_profiler()->PushHeapObjectsStats(stream);
-}
-
-
 SnapshotObjectId HeapProfiler::GetHeapStats(OutputStream* stream) {
   return reinterpret_cast<i::HeapProfiler*>(this)->PushHeapObjectsStats(stream);
 }
 
 
-void HeapProfiler::DeleteAllSnapshots() {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots");
-  isolate->heap_profiler()->DeleteAllSnapshots();
-}
-
-
 void HeapProfiler::DeleteAllHeapSnapshots() {
   reinterpret_cast<i::HeapProfiler*>(this)->DeleteAllSnapshots();
 }
 
 
-void HeapProfiler::DefineWrapperClass(uint16_t class_id,
-                                      WrapperInfoCallback callback) {
-  i::Isolate::Current()->heap_profiler()->DefineWrapperClass(class_id,
-                                                             callback);
-}
-
-
 void HeapProfiler::SetWrapperClassInfoProvider(uint16_t class_id,
                                                WrapperInfoCallback callback) {
   reinterpret_cast<i::HeapProfiler*>(this)->DefineWrapperClass(class_id,
@@ -7770,17 +7750,6 @@
 }
 
 
-int HeapProfiler::GetPersistentHandleCount() {
-  i::Isolate* isolate = i::Isolate::Current();
-  return isolate->global_handles()->NumberOfGlobalHandles();
-}
-
-
-size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
-  return i::Isolate::Current()->heap_profiler()->GetMemorySizeUsedByProfiler();
-}
-
-
 size_t HeapProfiler::GetProfilerMemorySize() {
   return reinterpret_cast<i::HeapProfiler*>(this)->
       GetMemorySizeUsedByProfiler();
diff --git a/src/api.h b/src/api.h
index 50d4b38..0f33bc8 100644
--- a/src/api.h
+++ b/src/api.h
@@ -170,6 +170,7 @@
   V(Object, JSObject)                          \
   V(Array, JSArray)                            \
   V(ArrayBuffer, JSArrayBuffer)                \
+  V(ArrayBufferView, JSArrayBufferView)        \
   V(TypedArray, JSTypedArray)                  \
   V(Uint8Array, JSTypedArray)                  \
   V(Uint8ClampedArray, JSTypedArray)           \
@@ -180,6 +181,7 @@
   V(Int32Array, JSTypedArray)                  \
   V(Float32Array, JSTypedArray)                \
   V(Float64Array, JSTypedArray)                \
+  V(DataView, JSDataView)                      \
   V(String, String)                            \
   V(Symbol, Symbol)                            \
   V(Script, Object)                            \
@@ -217,6 +219,10 @@
       v8::internal::Handle<v8::internal::JSArray> obj);
   static inline Local<ArrayBuffer> ToLocal(
       v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
+  static inline Local<ArrayBufferView> ToLocal(
+      v8::internal::Handle<v8::internal::JSArrayBufferView> obj);
+  static inline Local<DataView> ToLocal(
+      v8::internal::Handle<v8::internal::JSDataView> obj);
 
   static inline Local<TypedArray> ToLocal(
       v8::internal::Handle<v8::internal::JSTypedArray> obj);
@@ -348,6 +354,8 @@
 MAKE_TO_LOCAL(ToLocal, JSObject, Object)
 MAKE_TO_LOCAL(ToLocal, JSArray, Array)
 MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
+MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
+MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
 MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
 
 MAKE_TO_LOCAL_TYPED_ARRAY(Uint8Array, kExternalUnsignedByteArray)
diff --git a/src/arguments.cc b/src/arguments.cc
index b15ae7e..11d9279 100644
--- a/src/arguments.cc
+++ b/src/arguments.cc
@@ -28,6 +28,8 @@
 #include "v8.h"
 #include "arguments.h"
 
+#include "vm-state-inl.h"
+
 namespace v8 {
 namespace internal {
 
@@ -90,6 +92,8 @@
   Isolate* isolate = this->isolate();
   void* f_as_void = CallbackTable::FunctionToVoidPtr(f);
   bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void);
+  VMState<EXTERNAL> state(isolate);
+  ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
   if (new_style) {
     FunctionCallback c = reinterpret_cast<FunctionCallback>(f);
     FunctionCallbackInfo<v8::Value> info(end(),
@@ -114,6 +118,8 @@
   Isolate* isolate = this->isolate();                                          \
   void* f_as_void = CallbackTable::FunctionToVoidPtr(f);                       \
   bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void);             \
+  VMState<EXTERNAL> state(isolate);                                            \
+  ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));                 \
   if (new_style) {                                                             \
     NewFunction c = reinterpret_cast<NewFunction>(f);                          \
     PropertyCallbackInfo<ReturnValue> info(end());                             \
@@ -132,6 +138,8 @@
   Isolate* isolate = this->isolate();                                          \
   void* f_as_void = CallbackTable::FunctionToVoidPtr(f);                       \
   bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void);             \
+  VMState<EXTERNAL> state(isolate);                                            \
+  ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));                 \
   if (new_style) {                                                             \
     NewFunction c = reinterpret_cast<NewFunction>(f);                          \
     PropertyCallbackInfo<ReturnValue> info(end());                             \
@@ -151,6 +159,8 @@
   Isolate* isolate = this->isolate();                                          \
   void* f_as_void = CallbackTable::FunctionToVoidPtr(f);                       \
   bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void);             \
+  VMState<EXTERNAL> state(isolate);                                            \
+  ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));                 \
   if (new_style) {                                                             \
     NewFunction c = reinterpret_cast<NewFunction>(f);                          \
     PropertyCallbackInfo<ReturnValue> info(end());                             \
@@ -170,6 +180,8 @@
   Isolate* isolate = this->isolate();                                          \
   void* f_as_void = CallbackTable::FunctionToVoidPtr(f);                       \
   bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void);             \
+  VMState<EXTERNAL> state(isolate);                                            \
+  ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));                 \
   if (new_style) {                                                             \
     NewFunction c = reinterpret_cast<NewFunction>(f);                          \
     PropertyCallbackInfo<ReturnValue> info(end());                             \
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 17831c8..bfe9bc8 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -149,6 +149,7 @@
 
 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  ASSERT(!target->IsConsString());
   Assembler::set_target_pointer_at(pc_, reinterpret_cast<Address>(target));
   if (mode == UPDATE_WRITE_BARRIER &&
       host() != NULL &&
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 4d7bc8e..6b3caf3 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -104,360 +104,6 @@
 }
 
 
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. An elements backing store is allocated with size initial_capacity
-// and filled with the hole values.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
-                                 Register array_function,
-                                 Register result,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Register scratch3,
-                                 Label* gc_required) {
-  const int initial_capacity = JSArray::kPreallocatedArrayElements;
-  STATIC_ASSERT(initial_capacity >= 0);
-  __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
-  // Allocate the JSArray object together with space for a fixed array with the
-  // requested elements.
-  int size = JSArray::kSize;
-  if (initial_capacity > 0) {
-    size += FixedArray::SizeFor(initial_capacity);
-  }
-  __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
-
-  // Allocated the JSArray. Now initialize the fields except for the elements
-  // array.
-  // result: JSObject
-  // scratch1: initial map
-  // scratch2: start of next object
-  __ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
-  __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
-  __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
-  // Field JSArray::kElementsOffset is initialized later.
-  __ mov(scratch3,  Operand::Zero());
-  __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
-
-  if (initial_capacity == 0) {
-    __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
-    return;
-  }
-
-  // Calculate the location of the elements array and set elements array member
-  // of the JSArray.
-  // result: JSObject
-  // scratch2: start of next object
-  __ add(scratch1, result, Operand(JSArray::kSize));
-  __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
-
-  // Clear the heap tag on the elements array.
-  __ sub(scratch1, scratch1, Operand(kHeapObjectTag));
-
-  // Initialize the FixedArray and fill it with holes. FixedArray length is
-  // stored as a smi.
-  // result: JSObject
-  // scratch1: elements array (untagged)
-  // scratch2: start of next object
-  __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
-  STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
-  __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
-  __ mov(scratch3,  Operand(Smi::FromInt(initial_capacity)));
-  STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
-  __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
-
-  // Fill the FixedArray with the hole value. Inline the code if short.
-  STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
-  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
-  static const int kLoopUnfoldLimit = 4;
-  if (initial_capacity <= kLoopUnfoldLimit) {
-    for (int i = 0; i < initial_capacity; i++) {
-      __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
-    }
-  } else {
-    Label loop, entry;
-    __ add(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
-    __ b(&entry);
-    __ bind(&loop);
-    __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
-    __ bind(&entry);
-    __ cmp(scratch1, scratch2);
-    __ b(lt, &loop);
-  }
-}
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array_storage and elements_array_end
-// (see  below for when that is not the case). If the parameter fill_with_holes
-// is true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array_storage is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
-                            Register array_function,  // Array function.
-                            Register array_size,  // As a smi, cannot be 0.
-                            Register result,
-                            Register elements_array_storage,
-                            Register elements_array_end,
-                            Register scratch1,
-                            Register scratch2,
-                            bool fill_with_hole,
-                            Label* gc_required) {
-  // Load the initial map from the array function.
-  __ LoadInitialArrayMap(array_function, scratch2,
-                         elements_array_storage, fill_with_hole);
-
-  if (FLAG_debug_code) {  // Assert that array size is not zero.
-    __ tst(array_size, array_size);
-    __ Assert(ne, "array size is unexpectedly 0");
-  }
-
-  // Allocate the JSArray object together with space for a FixedArray with the
-  // requested number of elements.
-  __ mov(elements_array_end,
-         Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
-  __ add(elements_array_end, elements_array_end, Operand::SmiUntag(array_size));
-  __ Allocate(elements_array_end,
-              result,
-              scratch1,
-              scratch2,
-              gc_required,
-              static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
-  // Allocated the JSArray. Now initialize the fields except for the elements
-  // array.
-  // result: JSObject
-  // elements_array_storage: initial map
-  // array_size: size of array (smi)
-  __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
-  __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
-  __ str(elements_array_storage,
-         FieldMemOperand(result, JSArray::kPropertiesOffset));
-  // Field JSArray::kElementsOffset is initialized later.
-  __ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
-
-  // Calculate the location of the elements array and set elements array member
-  // of the JSArray.
-  // result: JSObject
-  // array_size: size of array (smi)
-  __ add(elements_array_storage, result, Operand(JSArray::kSize));
-  __ str(elements_array_storage,
-         FieldMemOperand(result, JSArray::kElementsOffset));
-
-  // Clear the heap tag on the elements array.
-  __ sub(elements_array_storage,
-         elements_array_storage,
-         Operand(kHeapObjectTag));
-  // Initialize the fixed array and fill it with holes. FixedArray length is
-  // stored as a smi.
-  // result: JSObject
-  // elements_array_storage: elements array (untagged)
-  // array_size: size of array (smi)
-  __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
-  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
-  __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
-  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
-  __ str(array_size,
-         MemOperand(elements_array_storage, kPointerSize, PostIndex));
-
-  // Calculate elements array and elements array end.
-  // result: JSObject
-  // elements_array_storage: elements array element storage
-  // array_size: smi-tagged size of elements array
-  __ add(elements_array_end,
-         elements_array_storage,
-         Operand::PointerOffsetFromSmiKey(array_size));
-
-  // Fill the allocated FixedArray with the hole value if requested.
-  // result: JSObject
-  // elements_array_storage: elements array element storage
-  // elements_array_end: start of next object
-  if (fill_with_hole) {
-    Label loop, entry;
-    __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ str(scratch1,
-           MemOperand(elements_array_storage, kPointerSize, PostIndex));
-    __ bind(&entry);
-    __ cmp(elements_array_storage, elements_array_end);
-    __ b(lt, &loop);
-  }
-}
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-//   r0: argc
-//   r1: constructor (built-in Array function)
-//   lr: return address
-//   sp[0]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in r1 needs to be preserved for
-// entering the generic code. In both cases argc in r0 needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// construct call and normal call.
-void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
-  Counters* counters = masm->isolate()->counters();
-  Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
-      has_non_smi_element, finish, cant_transition_map, not_double;
-
-  // Check for array construction with zero arguments or one.
-  __ cmp(r0, Operand::Zero());
-  __ b(ne, &argc_one_or_more);
-
-  // Handle construction of an empty array.
-  __ bind(&empty_array);
-  AllocateEmptyJSArray(masm,
-                       r1,
-                       r2,
-                       r3,
-                       r4,
-                       r5,
-                       call_generic_code);
-  __ IncrementCounter(counters->array_function_native(), 1, r3, r4);
-  // Set up return value, remove receiver from stack and return.
-  __ mov(r0, r2);
-  __ add(sp, sp, Operand(kPointerSize));
-  __ Jump(lr);
-
-  // Check for one argument. Bail out if argument is not smi or if it is
-  // negative.
-  __ bind(&argc_one_or_more);
-  __ cmp(r0, Operand(1));
-  __ b(ne, &argc_two_or_more);
-  __ ldr(r2, MemOperand(sp));  // Get the argument from the stack.
-  __ tst(r2, r2);
-  __ b(ne, &not_empty_array);
-  __ Drop(1);  // Adjust stack.
-  __ mov(r0, Operand::Zero());  // Treat this as a call with argc of zero.
-  __ b(&empty_array);
-
-  __ bind(&not_empty_array);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
-  __ b(ne, call_generic_code);
-
-  // Handle construction of an empty array of a certain size. Bail out if size
-  // is too large to actually allocate an elements array.
-  STATIC_ASSERT(kSmiTag == 0);
-  __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
-  __ b(ge, call_generic_code);
-
-  // r0: argc
-  // r1: constructor
-  // r2: array_size (smi)
-  // sp[0]: argument
-  AllocateJSArray(masm,
-                  r1,
-                  r2,
-                  r3,
-                  r4,
-                  r5,
-                  r6,
-                  r7,
-                  true,
-                  call_generic_code);
-  __ IncrementCounter(counters->array_function_native(), 1, r2, r4);
-  // Set up return value, remove receiver and argument from stack and return.
-  __ mov(r0, r3);
-  __ add(sp, sp, Operand(2 * kPointerSize));
-  __ Jump(lr);
-
-  // Handle construction of an array from a list of arguments.
-  __ bind(&argc_two_or_more);
-  __ SmiTag(r2, r0);
-
-  // r0: argc
-  // r1: constructor
-  // r2: array_size (smi)
-  // sp[0]: last argument
-  AllocateJSArray(masm,
-                  r1,
-                  r2,
-                  r3,
-                  r4,
-                  r5,
-                  r6,
-                  r7,
-                  false,
-                  call_generic_code);
-  __ IncrementCounter(counters->array_function_native(), 1, r2, r6);
-
-  // Fill arguments as array elements. Copy from the top of the stack (last
-  // element) to the array backing store filling it backwards. Note:
-  // elements_array_end points after the backing store therefore PreIndex is
-  // used when filling the backing store.
-  // r0: argc
-  // r3: JSArray
-  // r4: elements_array storage start (untagged)
-  // r5: elements_array_end (untagged)
-  // sp[0]: last argument
-  Label loop, entry;
-  __ mov(r7, sp);
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
-  if (FLAG_smi_only_arrays) {
-    __ JumpIfNotSmi(r2, &has_non_smi_element);
-  }
-  __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
-  __ bind(&entry);
-  __ cmp(r4, r5);
-  __ b(lt, &loop);
-
-  __ bind(&finish);
-  __ mov(sp, r7);
-
-  // Remove caller arguments and receiver from the stack, setup return value and
-  // return.
-  // r0: argc
-  // r3: JSArray
-  // sp[0]: receiver
-  __ add(sp, sp, Operand(kPointerSize));
-  __ mov(r0, r3);
-  __ Jump(lr);
-
-  __ bind(&has_non_smi_element);
-  // Double values are handled by the runtime.
-  __ CheckMap(
-      r2, r9, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
-  __ bind(&cant_transition_map);
-  __ UndoAllocationInNewSpace(r3, r4);
-  __ b(call_generic_code);
-
-  __ bind(&not_double);
-  // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
-  // r3: JSArray
-  __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         r2,
-                                         r9,
-                                         &cant_transition_map);
-  __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
-  __ RecordWriteField(r3,
-                      HeapObject::kMapOffset,
-                      r2,
-                      r9,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  Label loop2;
-  __ sub(r7, r7, Operand(kPointerSize));
-  __ bind(&loop2);
-  __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
-  __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
-  __ cmp(r4, r5);
-  __ b(lt, &loop2);
-  __ b(&finish);
-}
-
-
 void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r0     : number of arguments
@@ -480,20 +126,9 @@
 
   // Run the native code for the InternalArray function called as a normal
   // function.
-  if (FLAG_optimize_constructed_arrays) {
-    // tail call a stub
-    InternalArrayConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    ArrayNativeCode(masm, &generic_array_code);
-
-    // Jump to the generic array code if the specialized code cannot handle the
-    // construction.
-    __ bind(&generic_array_code);
-    Handle<Code> array_code =
-        masm->isolate()->builtins()->InternalArrayCodeGeneric();
-    __ Jump(array_code, RelocInfo::CODE_TARGET);
-  }
+  // tail call a stub
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
 
@@ -518,56 +153,13 @@
   }
 
   // Run the native code for the Array function called as a normal function.
-  if (FLAG_optimize_constructed_arrays) {
-    // tail call a stub
-    Handle<Object> undefined_sentinel(
-        masm->isolate()->heap()->undefined_value(),
-        masm->isolate());
-    __ mov(r2, Operand(undefined_sentinel));
-    ArrayConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    ArrayNativeCode(masm, &generic_array_code);
-
-    // Jump to the generic array code if the specialized code cannot handle
-    // the construction.
-    __ bind(&generic_array_code);
-    Handle<Code> array_code =
-        masm->isolate()->builtins()->ArrayCodeGeneric();
-    __ Jump(array_code, RelocInfo::CODE_TARGET);
-  }
-}
-
-
-void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r0     : number of arguments
-  //  -- r1     : constructor function
-  //  -- r2     : type info cell
-  //  -- lr     : return address
-  //  -- sp[...]: constructor arguments
-  // -----------------------------------
-
-  if (FLAG_debug_code) {
-    // The array construct code is only set for the builtin and internal
-    // Array functions which always have a map.
-    // Initial map for the builtin Array function should be a map.
-    __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
-    __ SmiTst(r3);
-    __ Assert(ne, "Unexpected initial map for Array function");
-    __ CompareObjectType(r3, r3, r4, MAP_TYPE);
-    __ Assert(eq, "Unexpected initial map for Array function");
-  }
-  Label generic_constructor;
-  // Run the native code for the Array function called as a constructor.
-  ArrayNativeCode(masm, &generic_constructor);
-
-  // Jump to the generic construct code in case the specialized code cannot
-  // handle the construction.
-  __ bind(&generic_constructor);
-  Handle<Code> generic_construct_stub =
-      masm->isolate()->builtins()->JSConstructStubGeneric();
-  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+  // tail call a stub
+  Handle<Object> undefined_sentinel(
+      masm->isolate()->heap()->undefined_value(),
+      masm->isolate());
+  __ mov(r2, Operand(undefined_sentinel));
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
 
@@ -1125,6 +717,7 @@
   // r3: argc
   // r4: argv
   // r5-r7, cp may be clobbered
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
   // Clear the context before we push it when entering the internal frame.
   __ mov(cp, Operand::Zero());
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
old mode 100644
new mode 100755
index ca44ffe..8e9237c
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -892,12 +892,17 @@
 
     // Now that we have the types we might as well check for
     // internalized-internalized.
-    // Ensure that no non-strings have the internalized bit set.
-    STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask);
+    Label not_internalized;
     STATIC_ASSERT(kInternalizedTag != 0);
-    __ and_(r2, r2, Operand(r3));
-    __ tst(r2, Operand(kIsInternalizedMask));
-    __ b(ne, &return_not_equal);
+    __ and_(r2, r2, Operand(kIsNotStringMask | kIsInternalizedMask));
+    __ cmp(r2, Operand(kInternalizedTag | kStringTag));
+    __ b(ne, &not_internalized);  // r2 (rhs) is not an internalized string
+
+    __ and_(r3, r3, Operand(kIsNotStringMask | kIsInternalizedMask));
+    __ cmp(r3, Operand(kInternalizedTag | kStringTag));
+    __ b(eq, &return_not_equal);  // both rhs and lhs are internalized strings
+
+    __ bind(&not_internalized);
 }
 
 
@@ -937,7 +942,6 @@
          (lhs.is(r1) && rhs.is(r0)));
 
   // r2 is object type of rhs.
-  // Ensure that no non-strings have the internalized bit set.
   Label object_test;
   STATIC_ASSERT(kInternalizedTag != 0);
   __ tst(r2, Operand(kIsNotStringMask));
@@ -2075,7 +2079,7 @@
 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
   Label right_arg_changed, call_runtime;
 
-  if (op_ == Token::MOD && has_fixed_right_arg_) {
+  if (op_ == Token::MOD && encoded_right_arg_.has_value) {
     // It is guaranteed that the value will fit into a Smi, because if it
     // didn't, we wouldn't be here, see BinaryOp_Patch.
     __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value())));
@@ -2252,7 +2256,7 @@
         // to type transition.
 
       } else {
-        if (has_fixed_right_arg_) {
+        if (encoded_right_arg_.has_value) {
           __ Vmov(d8, fixed_right_arg_value(), scratch1);
           __ VFPCompareAndSetFlags(d1, d8);
           __ b(ne, &transition);
@@ -2996,9 +3000,7 @@
   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
   RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
-  if (FLAG_optimize_constructed_arrays) {
-    ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
-  }
+  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
 }
 
 
@@ -3179,6 +3181,8 @@
   // sp: stack pointer  (restored as callee's sp after C call)
   // cp: current context  (C callee-saved)
 
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // Result returned in r0 or r0+r1 by default.
 
   // NOTE: Invocations of builtins may return failure objects
@@ -3269,6 +3273,8 @@
 
   Label invoke, handler_entry, exit;
 
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // Called from C, so do not pop argc and args on exit (preserve sp)
   // No need to save register-passed args
   // Save callee-saved registers (incl. cp and fp), sp, and lr
@@ -4623,52 +4629,12 @@
 }
 
 
-static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
-  // Cache the called function in a global property cell.  Cache states
-  // are uninitialized, monomorphic (indicated by a JSFunction), and
-  // megamorphic.
-  // r1 : the function to call
-  // r2 : cache cell for call target
-  Label done;
-
-  ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
-            masm->isolate()->heap()->undefined_value());
-  ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
-            masm->isolate()->heap()->the_hole_value());
-
-  // Load the cache state into r3.
-  __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
-
-  // A monomorphic cache hit or an already megamorphic state: invoke the
-  // function without changing the state.
-  __ cmp(r3, r1);
-  __ b(eq, &done);
-  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
-  __ b(eq, &done);
-
-  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
-  // megamorphic.
-  __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
-  // MegamorphicSentinel is an immortal immovable object (undefined) so no
-  // write-barrier is needed.
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
-  __ str(ip, FieldMemOperand(r2, Cell::kValueOffset), ne);
-
-  // An uninitialized cache is patched with the function.
-  __ str(r1, FieldMemOperand(r2, Cell::kValueOffset), eq);
-  // No need for a write barrier here - cells are rescanned.
-
-  __ bind(&done);
-}
-
-
 static void GenerateRecordCallTarget(MacroAssembler* masm) {
   // Cache the called function in a global property cell.  Cache states
   // are uninitialized, monomorphic (indicated by a JSFunction), and
   // megamorphic.
   // r1 : the function to call
   // r2 : cache cell for call target
-  ASSERT(FLAG_optimize_constructed_arrays);
   Label initialize, done, miss, megamorphic, not_array_function;
 
   ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
@@ -4689,12 +4655,15 @@
   // Special handling of the Array() function, which caches not only the
   // monomorphic Array function but the initial ElementsKind with special
   // sentinels
-  Handle<Object> terminal_kind_sentinel =
-      TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
-                                                  LAST_FAST_ELEMENTS_KIND);
   __ JumpIfNotSmi(r3, &miss);
-  __ cmp(r3, Operand(terminal_kind_sentinel));
-  __ b(gt, &miss);
+  if (FLAG_debug_code) {
+    Handle<Object> terminal_kind_sentinel =
+        TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
+                                                    LAST_FAST_ELEMENTS_KIND);
+    __ cmp(r3, Operand(terminal_kind_sentinel));
+    __ Assert(le, "Array function sentinel is not an ElementsKind");
+  }
+
   // Make sure the function is the Array() function
   __ LoadArrayFunction(r3);
   __ cmp(r1, r3);
@@ -4772,11 +4741,7 @@
   __ b(ne, &slow);
 
   if (RecordCallTarget()) {
-    if (FLAG_optimize_constructed_arrays) {
-      GenerateRecordCallTarget(masm);
-    } else {
-      GenerateRecordCallTargetNoArray(masm);
-    }
+    GenerateRecordCallTarget(masm);
   }
 
   // Fast-case: Invoke the function now.
@@ -4851,15 +4816,11 @@
   __ b(ne, &slow);
 
   if (RecordCallTarget()) {
-    if (FLAG_optimize_constructed_arrays) {
-      GenerateRecordCallTarget(masm);
-    } else {
-      GenerateRecordCallTargetNoArray(masm);
-    }
+    GenerateRecordCallTarget(masm);
   }
 
   // Jump to the function-specific construct stub.
-  Register jmp_reg = FLAG_optimize_constructed_arrays ? r3 : r2;
+  Register jmp_reg = r3;
   __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
   __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
                                   SharedFunctionInfo::kConstructStubOffset));
@@ -6263,9 +6224,14 @@
   __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kInternalizedTag != 0);
-  __ and_(tmp1, tmp1, Operand(tmp2));
-  __ tst(tmp1, Operand(kIsInternalizedMask));
-  __ b(eq, &miss);
+
+  __ and_(tmp1, tmp1, Operand(kIsNotStringMask | kIsInternalizedMask));
+  __ cmp(tmp1, Operand(kInternalizedTag | kStringTag));
+  __ b(ne, &miss);
+
+  __ and_(tmp2, tmp2, Operand(kIsNotStringMask | kIsInternalizedMask));
+  __ cmp(tmp2, Operand(kInternalizedTag | kStringTag));
+  __ b(ne, &miss);
 
   // Internalized strings are compared by identity.
   __ cmp(left, right);
@@ -6304,19 +6270,8 @@
   __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
 
-  Label succeed1;
-  __ tst(tmp1, Operand(kIsInternalizedMask));
-  __ b(ne, &succeed1);
-  __ cmp(tmp1, Operand(SYMBOL_TYPE));
-  __ b(ne, &miss);
-  __ bind(&succeed1);
-
-  Label succeed2;
-  __ tst(tmp2, Operand(kIsInternalizedMask));
-  __ b(ne, &succeed2);
-  __ cmp(tmp2, Operand(SYMBOL_TYPE));
-  __ b(ne, &miss);
-  __ bind(&succeed2);
+  __ JumpIfNotUniqueName(tmp1, &miss);
+  __ JumpIfNotUniqueName(tmp2, &miss);
 
   // Unique names are compared by identity.
   __ cmp(left, right);
@@ -6371,7 +6326,8 @@
   // Handle not identical strings.
 
   // Check that both strings are internalized strings. If they are, we're done
-  // because we already know they are not identical.
+  // because we already know they are not identical. We know they are both
+  // strings.
   if (equality) {
     ASSERT(GetCondition() == eq);
     STATIC_ASSERT(kInternalizedTag != 0);
@@ -6557,11 +6513,7 @@
     __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
     __ ldrb(entity_name,
             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
-    __ tst(entity_name, Operand(kIsInternalizedMask));
-    __ b(ne, &good);
-    __ cmp(entity_name, Operand(SYMBOL_TYPE));
-    __ b(ne, miss);
-
+    __ JumpIfNotUniqueName(entity_name, miss);
     __ bind(&good);
 
     // Restore the properties.
@@ -6728,15 +6680,10 @@
 
     if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
       // Check if the entry name is not a unique name.
-      Label cont;
       __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
       __ ldrb(entry_key,
               FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
-      __ tst(entry_key, Operand(kIsInternalizedMask));
-      __ b(ne, &cont);
-      __ cmp(entry_key, Operand(SYMBOL_TYPE));
-      __ b(ne, &maybe_in_dictionary);
-      __ bind(&cont);
+      __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
     }
   }
 
@@ -7131,8 +7078,9 @@
 
 
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
-  if (entry_hook_ != NULL) {
+  if (masm->isolate()->function_entry_hook() != NULL) {
     PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
+    AllowStubCallsScope allow_stub_calls(masm, true);
     ProfileEntryHookStub stub;
     __ push(lr);
     __ CallStub(&stub);
@@ -7146,9 +7094,21 @@
   const int32_t kReturnAddressDistanceFromFunctionStart =
       3 * Assembler::kInstrSize;
 
-  // Save live volatile registers.
-  __ Push(lr, r5, r1);
-  const int32_t kNumSavedRegs = 3;
+  // This should contain all kCallerSaved registers.
+  const RegList kSavedRegs =
+      1 <<  0 |  // r0
+      1 <<  1 |  // r1
+      1 <<  2 |  // r2
+      1 <<  3 |  // r3
+      1 <<  5 |  // r5
+      1 <<  9;   // r9
+  // We also save lr, so the count here is one higher than the mask indicates.
+  const int32_t kNumSavedRegs = 7;
+
+  ASSERT((kCallerSaved & kSavedRegs) == kCallerSaved);
+
+  // Save all caller-save registers as this may be called from anywhere.
+  __ stm(db_w, sp, kSavedRegs | lr.bit());
 
   // Compute the function's address for the first argument.
   __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
@@ -7166,14 +7126,13 @@
   }
 
 #if defined(V8_HOST_ARCH_ARM)
-  __ mov(ip, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
-  __ ldr(ip, MemOperand(ip));
+  int32_t entry_hook =
+      reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
+  __ mov(ip, Operand(entry_hook));
 #else
   // Under the simulator we need to indirect the entry hook through a
   // trampoline function at a known address.
-  Address trampoline_address = reinterpret_cast<Address>(
-      reinterpret_cast<intptr_t>(EntryHookTrampoline));
-  ApiFunction dispatcher(trampoline_address);
+  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
   __ mov(ip, Operand(ExternalReference(&dispatcher,
                                        ExternalReference::BUILTIN_CALL,
                                        masm->isolate())));
@@ -7185,8 +7144,8 @@
     __ mov(sp, r5);
   }
 
-  __ Pop(lr, r5, r1);
-  __ Ret();
+  // Also pop pc to get Ret(0).
+  __ ldm(ia_w, sp, kSavedRegs | pc.bit());
 }
 
 
@@ -7242,6 +7201,10 @@
   __ cmp(r2, Operand(undefined_sentinel));
   __ b(eq, &normal_sequence);
 
+  // The type cell may have gone megamorphic, don't overwrite if so
+  __ ldr(r5, FieldMemOperand(r2, kPointerSize));
+  __ JumpIfNotSmi(r5, &normal_sequence);
+
   // Save the resulting elements kind in type info
   __ SmiTag(r3);
   __ str(r3, FieldMemOperand(r2, kPointerSize));
@@ -7274,7 +7237,7 @@
     T stub(kind);
     stub.GetCode(isolate)->set_is_pregenerated(true);
     if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
-      T stub1(kind, true);
+      T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
       stub1.GetCode(isolate)->set_is_pregenerated(true);
     }
   }
@@ -7341,52 +7304,39 @@
     __ bind(&okay_here);
   }
 
-  if (FLAG_optimize_constructed_arrays) {
-    Label no_info, switch_ready;
-    // Get the elements kind and case on that.
-    __ cmp(r2, Operand(undefined_sentinel));
-    __ b(eq, &no_info);
-    __ ldr(r3, FieldMemOperand(r2, PropertyCell::kValueOffset));
-    __ JumpIfNotSmi(r3, &no_info);
-    __ SmiUntag(r3);
-    __ jmp(&switch_ready);
-    __ bind(&no_info);
-    __ mov(r3, Operand(GetInitialFastElementsKind()));
-    __ bind(&switch_ready);
+  Label no_info, switch_ready;
+  // Get the elements kind and case on that.
+  __ cmp(r2, Operand(undefined_sentinel));
+  __ b(eq, &no_info);
+  __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
+  __ JumpIfNotSmi(r3, &no_info);
+  __ SmiUntag(r3);
+  __ jmp(&switch_ready);
+  __ bind(&no_info);
+  __ mov(r3, Operand(GetInitialFastElementsKind()));
+  __ bind(&switch_ready);
 
-    if (argument_count_ == ANY) {
-      Label not_zero_case, not_one_case;
-      __ tst(r0, r0);
-      __ b(ne, &not_zero_case);
-      CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+  if (argument_count_ == ANY) {
+    Label not_zero_case, not_one_case;
+    __ tst(r0, r0);
+    __ b(ne, &not_zero_case);
+    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
 
-      __ bind(&not_zero_case);
-      __ cmp(r0, Operand(1));
-      __ b(gt, &not_one_case);
-      CreateArrayDispatchOneArgument(masm);
+    __ bind(&not_zero_case);
+    __ cmp(r0, Operand(1));
+    __ b(gt, &not_one_case);
+    CreateArrayDispatchOneArgument(masm);
 
-      __ bind(&not_one_case);
-      CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
-    } else if (argument_count_ == NONE) {
-      CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-    } else if (argument_count_ == ONE) {
-      CreateArrayDispatchOneArgument(masm);
-    } else if (argument_count_ == MORE_THAN_ONE) {
-      CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
-    } else {
-      UNREACHABLE();
-    }
+    __ bind(&not_one_case);
+    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
+  } else if (argument_count_ == NONE) {
+    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+  } else if (argument_count_ == ONE) {
+    CreateArrayDispatchOneArgument(masm);
+  } else if (argument_count_ == MORE_THAN_ONE) {
+    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
   } else {
-     Label generic_constructor;
-     // Run the native code for the Array function called as a constructor.
-     ArrayNativeCode(masm, &generic_constructor);
-
-     // Jump to the generic construct code in case the specialized code cannot
-     // handle the construction.
-     __ bind(&generic_constructor);
-     Handle<Code> generic_construct_stub =
-         masm->isolate()->builtins()->JSConstructStubGeneric();
-     __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+    UNREACHABLE();
   }
 }
 
@@ -7448,45 +7398,31 @@
     __ Assert(eq, "Unexpected initial map for Array function");
   }
 
-  if (FLAG_optimize_constructed_arrays) {
-    // Figure out the right elements kind
-    __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+  // Figure out the right elements kind
+  __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+  // Load the map's "bit field 2" into |result|. We only need the first byte,
+  // but the following bit field extraction takes care of that anyway.
+  __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
+  // Retrieve elements_kind from bit field 2.
+  __ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount);
 
-    // Load the map's "bit field 2" into |result|. We only need the first byte,
-    // but the following bit field extraction takes care of that anyway.
-    __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
-    // Retrieve elements_kind from bit field 2.
-    __ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount);
-
-    if (FLAG_debug_code) {
-      Label done;
-      __ cmp(r3, Operand(FAST_ELEMENTS));
-      __ b(eq, &done);
-      __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
-      __ Assert(eq,
-          "Invalid ElementsKind for InternalArray or InternalPackedArray");
-      __ bind(&done);
-    }
-
-    Label fast_elements_case;
+  if (FLAG_debug_code) {
+    Label done;
     __ cmp(r3, Operand(FAST_ELEMENTS));
-    __ b(eq, &fast_elements_case);
-    GenerateCase(masm, FAST_HOLEY_ELEMENTS);
-
-    __ bind(&fast_elements_case);
-    GenerateCase(masm, FAST_ELEMENTS);
-  } else {
-    Label generic_constructor;
-    // Run the native code for the Array function called as constructor.
-    ArrayNativeCode(masm, &generic_constructor);
-
-    // Jump to the generic construct code in case the specialized code cannot
-    // handle the construction.
-    __ bind(&generic_constructor);
-    Handle<Code> generic_construct_stub =
-        masm->isolate()->builtins()->JSConstructStubGeneric();
-    __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+    __ b(eq, &done);
+    __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
+    __ Assert(eq,
+              "Invalid ElementsKind for InternalArray or InternalPackedArray");
+    __ bind(&done);
   }
+
+  Label fast_elements_case;
+  __ cmp(r3, Operand(FAST_ELEMENTS));
+  __ b(eq, &fast_elements_case);
+  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+  __ bind(&fast_elements_case);
+  GenerateCase(masm, FAST_ELEMENTS);
 }
 
 
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index f5a7dbd..72fc5ce 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -32,7 +32,7 @@
 #include "assembler.h"
 #include "assembler-arm.h"
 #include "assembler-arm-inl.h"
-#include "frames-inl.h"
+#include "frames.h"
 #include "macro-assembler.h"
 #include "macro-assembler-arm.h"
 
@@ -40,11 +40,6 @@
 namespace internal {
 
 
-Address ExitFrame::ComputeStackPointer(Address fp) {
-  return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
 Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
 Register JavaScriptFrame::context_register() { return cp; }
 
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 77d5524..357f0af 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1692,10 +1692,10 @@
         ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
-        if (key->handle()->IsInternalizedString()) {
+        if (key->value()->IsInternalizedString()) {
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
-            __ mov(r2, Operand(key->handle()));
+            __ mov(r2, Operand(key->value()));
             __ ldr(r1, MemOperand(sp));
             Handle<Code> ic = is_classic_mode()
                 ? isolate()->builtins()->StoreIC_Initialize()
@@ -1829,10 +1829,7 @@
     Expression* subexpr = subexprs->at(i);
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
-    if (subexpr->AsLiteral() != NULL ||
-        CompileTimeValue::IsCompileTimeValue(subexpr)) {
-      continue;
-    }
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     if (!result_saved) {
       __ push(r0);
@@ -1994,14 +1991,29 @@
       __ push(result_register());
       // Fall through.
     case Yield::INITIAL: {
-      VisitForStackValue(expr->generator_object());
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-      __ ldr(context_register(),
-             MemOperand(fp, StandardFrameConstants::kContextOffset));
+      Label suspend, continuation, post_runtime, resume;
 
-      Label resume;
-      __ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
-      __ b(ne, &resume);
+      __ jmp(&suspend);
+
+      __ bind(&continuation);
+      __ jmp(&resume);
+
+      __ bind(&suspend);
+      VisitForAccumulatorValue(expr->generator_object());
+      ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+      __ mov(r1, Operand(Smi::FromInt(continuation.pos())));
+      __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
+      __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
+      __ mov(r1, cp);
+      __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
+                          kLRHasBeenSaved, kDontSaveFPRegs);
+      __ add(r1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+      __ cmp(sp, r1);
+      __ b(eq, &post_runtime);
+      __ push(r0);  // generator object
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+      __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+      __ bind(&post_runtime);
       __ pop(result_register());
       EmitReturnSequence();
 
@@ -2029,7 +2041,8 @@
       // [sp + 1 * kPointerSize] iter
       // [sp + 0 * kPointerSize] g
 
-      Label l_catch, l_try, l_resume, l_next, l_call, l_loop;
+      Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+      Label l_next, l_call, l_loop;
       // Initial send value is undefined.
       __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
       __ b(&l_next);
@@ -2051,13 +2064,22 @@
       __ PushTryHandler(StackHandler::CATCH, expr->index());
       const int handler_size = StackHandlerConstants::kSize;
       __ push(r0);                                       // result
-      __ ldr(r3, MemOperand(sp, (0 + 1) * kPointerSize + handler_size));  // g
-      __ push(r3);                                       // g
+      __ jmp(&l_suspend);
+      __ bind(&l_continuation);
+      __ jmp(&l_resume);
+      __ bind(&l_suspend);
+      const int generator_object_depth = kPointerSize + handler_size;
+      __ ldr(r0, MemOperand(sp, generator_object_depth));
+      __ push(r0);                                       // g
+      ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+      __ mov(r1, Operand(Smi::FromInt(l_continuation.pos())));
+      __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
+      __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
+      __ mov(r1, cp);
+      __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
+                          kLRHasBeenSaved, kDontSaveFPRegs);
       __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-      __ ldr(context_register(),
-             MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
-      __ b(ne, &l_resume);
+      __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
       __ pop(r0);                                        // result
       EmitReturnSequence();
       __ bind(&l_resume);                                // received in r0
@@ -2132,7 +2154,7 @@
   __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
   Label push_argument_holes, push_frame;
   __ bind(&push_argument_holes);
-  __ sub(r3, r3, Operand(1), SetCC);
+  __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
   __ b(mi, &push_frame);
   __ push(r2);
   __ jmp(&push_argument_holes);
@@ -2236,7 +2258,7 @@
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
-  __ mov(r2, Operand(key->handle()));
+  __ mov(r2, Operand(key->value()));
   // Call load IC. It has arguments receiver and property name r0 and r2.
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
@@ -2391,7 +2413,7 @@
       VisitForAccumulatorValue(prop->obj());
       __ mov(r1, r0);
       __ pop(r0);  // Restore value.
-      __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+      __ mov(r2, Operand(prop->key()->AsLiteral()->value()));
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->StoreIC_Initialize()
           : isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2520,7 +2542,7 @@
 
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+  __ mov(r2, Operand(prop->key()->AsLiteral()->value()));
   __ pop(r1);
 
   Handle<Code> ic = is_classic_mode()
@@ -2794,7 +2816,7 @@
     }
     if (property->key()->IsPropertyName()) {
       EmitCallWithIC(expr,
-                     property->key()->AsLiteral()->handle(),
+                     property->key()->AsLiteral()->value(),
                      RelocInfo::CODE_TARGET);
     } else {
       EmitKeyedCallWithIC(expr, property->key());
@@ -3402,7 +3424,7 @@
   ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   ASSERT_NE(NULL, args->at(1)->AsLiteral());
-  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
@@ -3821,7 +3843,7 @@
   ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
-  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
 
   Handle<FixedArray> jsfunction_result_caches(
       isolate()->native_context()->jsfunction_result_caches());
@@ -4494,7 +4516,7 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+      __ mov(r2, Operand(prop->key()->AsLiteral()->value()));
       __ pop(r1);
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->StoreIC_Initialize()
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 3d421c1..cc78151 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -321,7 +321,8 @@
   __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
   __ b(eq, index_string);
 
-  // Is the string internalized?
+  // Is the string internalized? We know it's a string, so a single
+  // bit test is enough.
   // map: key map
   __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kInternalizedTag != 0);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index b421f8a..c28b641 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -41,24 +41,6 @@
 LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
 #undef DEFINE_COMPILE
 
-LOsrEntry::LOsrEntry() {
-  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
-    register_spills_[i] = NULL;
-  }
-  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
-    double_register_spills_[i] = NULL;
-  }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
-                                    LOperand* spill_operand) {
-  ASSERT(spill_operand->IsStackSlot());
-  ASSERT(register_spills_[allocation_index] == NULL);
-  register_spills_[allocation_index] = spill_operand;
-}
-
-
 #ifdef DEBUG
 void LInstruction::VerifyCall() {
   // Call instructions can use only fixed registers as temporaries and
@@ -81,14 +63,6 @@
 #endif
 
 
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
-                                          LOperand* spill_operand) {
-  ASSERT(spill_operand->IsDoubleStackSlot());
-  ASSERT(double_register_spills_[allocation_index] == NULL);
-  double_register_spills_[allocation_index] = spill_operand;
-}
-
-
 void LInstruction::PrintTo(StringStream* stream) {
   stream->Add("%s ", this->Mnemonic());
 
@@ -352,8 +326,7 @@
   constructor()->PrintTo(stream);
   stream->Add(" #%d / ", arity());
   ASSERT(hydrogen()->property_cell()->value()->IsSmi());
-  ElementsKind kind = static_cast<ElementsKind>(
-      Smi::cast(hydrogen()->property_cell()->value())->value());
+  ElementsKind kind = hydrogen()->elements_kind();
   stream->Add(" (%s) ", ElementsKindToString(kind));
 }
 
@@ -451,7 +424,7 @@
 LPlatformChunk* LChunkBuilder::Build() {
   ASSERT(is_unused());
   chunk_ = new(zone()) LPlatformChunk(info(), graph());
-  HPhase phase("L_Building chunk", chunk_);
+  LPhase phase("L_Building chunk", chunk_);
   status_ = BUILDING;
   const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
   for (int i = 0; i < blocks->length(); i++) {
@@ -999,10 +972,13 @@
 
   LBranch* result = new(zone()) LBranch(UseRegister(value));
   // Tagged values that are not known smis or booleans require a
-  // deoptimization environment.
+  // deoptimization environment. If the instruction is generic no
+  // environment is needed since all cases are handled.
   Representation rep = value->representation();
   HType type = value->type();
-  if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+  ToBooleanStub::Types expected = instr->expected_input_types();
+  if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
+      !expected.IsGeneric()) {
     return AssignEnvironment(result);
   }
   return result;
@@ -1460,7 +1436,7 @@
               instr->CheckFlag(HValue::kBailoutOnMinusZero))
           ? AssignEnvironment(result)
           : result;
-    } else if (instr->has_fixed_right_arg()) {
+    } else if (instr->fixed_right_arg().has_value) {
       LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
                                      UseRegisterAtStart(right));
       return AssignEnvironment(DefineAsRegister(mod));
@@ -2023,7 +1999,7 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
   return AssignEnvironment(new(zone()) LCheckNonSmi(value));
 }
@@ -2420,6 +2396,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+  info()->MarkAsDeferredCalling();
+  LAllocateObject* result =
+      new(zone()) LAllocateObject(TempRegister(), TempRegister());
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
 LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
   info()->MarkAsDeferredCalling();
   LOperand* size = instr->size()->IsConstant()
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 8eff338..f34b623 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -49,6 +49,7 @@
 #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
+  V(AllocateObject)                             \
   V(Allocate)                                   \
   V(ApplyArguments)                             \
   V(ArgumentsElements)                          \
@@ -489,17 +490,44 @@
 template<int I, int T>
 class LControlInstruction: public LTemplateInstruction<0, I, T> {
  public:
+  LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
   virtual bool IsControl() const { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-  int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
-  int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+  int TrueDestination(LChunk* chunk) {
+    return chunk->LookupDestination(true_block_id());
+  }
+  int FalseDestination(LChunk* chunk) {
+    return chunk->LookupDestination(false_block_id());
+  }
+
+  Label* TrueLabel(LChunk* chunk) {
+    if (true_label_ == NULL) {
+      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+    }
+    return true_label_;
+  }
+  Label* FalseLabel(LChunk* chunk) {
+    if (false_label_ == NULL) {
+      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+    }
+    return false_label_;
+  }
+
+ protected:
+  int true_block_id() { return SuccessorAt(0)->block_id(); }
+  int false_block_id() { return SuccessorAt(1)->block_id(); }
 
  private:
   HControlInstruction* hydrogen() {
     return HControlInstruction::cast(this->hydrogen_value());
   }
+
+  Label* false_label_;
+  Label* true_label_;
 };
 
 
@@ -1236,7 +1264,7 @@
 };
 
 
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
+class LCmpMapAndBranch: public LControlInstruction<1, 1> {
  public:
   LCmpMapAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1249,15 +1277,7 @@
   DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(CompareMap)
 
-  virtual bool IsControl() const { return true; }
-
   Handle<Map> map() const { return hydrogen()->map(); }
-  int true_block_id() const {
-    return hydrogen()->FirstSuccessor()->block_id();
-  }
-  int false_block_id() const {
-    return hydrogen()->SecondSuccessor()->block_id();
-  }
 };
 
 
@@ -2401,6 +2421,7 @@
   LOperand* value() { return inputs_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
 };
 
 
@@ -2444,6 +2465,21 @@
 };
 
 
+class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LAllocateObject(LOperand* temp, LOperand* temp2) {
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
+};
+
+
 class LAllocate: public LTemplateInstruction<1, 2, 2> {
  public:
   LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
@@ -2546,26 +2582,10 @@
 
 class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
  public:
-  LOsrEntry();
+  LOsrEntry() {}
 
   virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
-  LOperand** SpilledRegisterArray() { return register_spills_; }
-  LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
-  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
-  void MarkSpilledDoubleRegister(int allocation_index,
-                                 LOperand* spill_operand);
-
- private:
-  // Arrays of spill slot operands for registers with an assigned spill
-  // slot, i.e., that must also be restored to the spill slot on OSR entry.
-  // NULL if the register has no assigned spill slot.  Indexed by allocation
-  // index.
-  LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
-  LOperand* double_register_spills_[
-      DoubleRegister::kMaxNumAllocatableRegisters];
 };
 
 
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index b7c733c..8525095 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -62,7 +62,7 @@
 #define __ masm()->
 
 bool LCodeGen::GenerateCode() {
-  HPhase phase("Z_Code generation", chunk());
+  LPhase phase("Z_Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
 
@@ -87,7 +87,7 @@
     RegisterDependentCodeForEmbeddedMaps(code);
   }
   PopulateDeoptimizationData(code);
-  info()->CommitDependentMaps(code);
+  info()->CommitDependencies(code);
 }
 
 
@@ -622,27 +622,6 @@
 
   for (int i = 0; i < translation_size; ++i) {
     LOperand* value = environment->values()->at(i);
-    // spilled_registers_ and spilled_double_registers_ are either
-    // both NULL or both set.
-    if (environment->spilled_registers() != NULL && value != NULL) {
-      if (value->IsRegister() &&
-          environment->spilled_registers()[value->index()] != NULL) {
-        translation->MarkDuplicate();
-        AddToTranslation(translation,
-                         environment->spilled_registers()[value->index()],
-                         environment->HasTaggedValueAt(i),
-                         environment->HasUint32ValueAt(i));
-      } else if (
-          value->IsDoubleRegister() &&
-          environment->spilled_double_registers()[value->index()] != NULL) {
-        translation->MarkDuplicate();
-        AddToTranslation(
-            translation,
-            environment->spilled_double_registers()[value->index()],
-            false,
-            false);
-      }
-    }
 
     // TODO(mstarzinger): Introduce marker operands to indicate that this value
     // is not present and must be reconstructed from the deoptimizer. Currently
@@ -652,12 +631,6 @@
       translation->BeginArgumentsObject(arguments_count);
       for (int i = 0; i < arguments_count; ++i) {
         LOperand* value = environment->values()->at(translation_size + i);
-        ASSERT(environment->spilled_registers() == NULL ||
-               !value->IsRegister() ||
-               environment->spilled_registers()[value->index()] == NULL);
-        ASSERT(environment->spilled_registers() == NULL ||
-               !value->IsDoubleRegister() ||
-               environment->spilled_double_registers()[value->index()] == NULL);
         AddToTranslation(translation,
                          value,
                          environment->HasTaggedValueAt(translation_size + i),
@@ -1120,7 +1093,8 @@
 
 
 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
-  // Nothing to do.
+  // Record the address of the first unknown OSR value as the place to enter.
+  if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
 }
 
 
@@ -1154,12 +1128,12 @@
     __ and_(result_reg, left_reg, Operand(divisor - 1));
     __ bind(&done);
 
-  } else if (hmod->has_fixed_right_arg()) {
+  } else if (hmod->fixed_right_arg().has_value) {
     Register left_reg = ToRegister(instr->left());
     Register right_reg = ToRegister(instr->right());
     Register result_reg = ToRegister(instr->result());
 
-    int32_t divisor = hmod->fixed_right_arg_value();
+    int32_t divisor = hmod->fixed_right_arg().value;
     ASSERT(IsPowerOf2(divisor));
 
     // Check if our assumption of a fixed right operand still holds.
@@ -1906,10 +1880,12 @@
   Register map = ToRegister(instr->temp());
   Label done;
 
-  // If the object is a smi return the object.
-  __ SmiTst(input);
-  __ Move(result, input, eq);
-  __ b(eq, &done);
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    // If the object is a smi return the object.
+    __ SmiTst(input);
+    __ Move(result, input, eq);
+    __ b(eq, &done);
+  }
 
   // If the object is not a value type, return the object.
   __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
@@ -2159,11 +2135,12 @@
   return -1;
 }
 
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
+  int right_block = instr->FalseDestination(chunk_);
+  int left_block = instr->TrueDestination(chunk_);
 
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
   int next_block = GetNextEmittedBlock();
-  right_block = chunk_->LookupDestination(right_block);
-  left_block = chunk_->LookupDestination(left_block);
 
   if (right_block == left_block) {
     EmitGoto(left_block);
@@ -2184,22 +2161,19 @@
 
 
 void LCodeGen::DoBranch(LBranch* instr) {
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
   Representation r = instr->hydrogen()->value()->representation();
   if (r.IsInteger32() || r.IsSmi()) {
     ASSERT(!info()->IsStub());
     Register reg = ToRegister(instr->value());
     __ cmp(reg, Operand::Zero());
-    EmitBranch(true_block, false_block, ne);
+    EmitBranch(instr, ne);
   } else if (r.IsDouble()) {
     ASSERT(!info()->IsStub());
     DwVfpRegister reg = ToDoubleRegister(instr->value());
     // Test the double value. Zero and NaN are false.
     __ VFPCompareAndSetFlags(reg, 0.0);
-    __ cmp(r0, r0, vs);  // If NaN, set the Z flag.
-    EmitBranch(true_block, false_block, ne);
+    __ cmp(r0, r0, vs);  // If NaN, set the Z flag. (NaN -> false)
+    EmitBranch(instr, ne);
   } else {
     ASSERT(r.IsTagged());
     Register reg = ToRegister(instr->value());
@@ -2207,42 +2181,55 @@
     if (type.IsBoolean()) {
       ASSERT(!info()->IsStub());
       __ CompareRoot(reg, Heap::kTrueValueRootIndex);
-      EmitBranch(true_block, false_block, eq);
+      EmitBranch(instr, eq);
     } else if (type.IsSmi()) {
       ASSERT(!info()->IsStub());
       __ cmp(reg, Operand::Zero());
-      EmitBranch(true_block, false_block, ne);
+      EmitBranch(instr, ne);
+    } else if (type.IsJSArray()) {
+      ASSERT(!info()->IsStub());
+      EmitBranch(instr, al);
+    } else if (type.IsHeapNumber()) {
+      ASSERT(!info()->IsStub());
+      DwVfpRegister dbl_scratch = double_scratch0();
+      __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+      // Test the double value. Zero and NaN are false.
+      __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
+      __ cmp(r0, r0, vs);  // If NaN, set the Z flag. (NaN)
+      EmitBranch(instr, ne);
+    } else if (type.IsString()) {
+      ASSERT(!info()->IsStub());
+      __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
+      __ cmp(ip, Operand::Zero());
+      EmitBranch(instr, ne);
     } else {
-      Label* true_label = chunk_->GetAssemblyLabel(true_block);
-      Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
 
       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
         // undefined -> false.
         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
-        __ b(eq, false_label);
+        __ b(eq, instr->FalseLabel(chunk_));
       }
       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
         // Boolean -> its value.
         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
-        __ b(eq, true_label);
+        __ b(eq, instr->TrueLabel(chunk_));
         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
-        __ b(eq, false_label);
+        __ b(eq, instr->FalseLabel(chunk_));
       }
       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
         // 'null' -> false.
         __ CompareRoot(reg, Heap::kNullValueRootIndex);
-        __ b(eq, false_label);
+        __ b(eq, instr->FalseLabel(chunk_));
       }
 
       if (expected.Contains(ToBooleanStub::SMI)) {
         // Smis: 0 -> false, all other -> true.
         __ cmp(reg, Operand::Zero());
-        __ b(eq, false_label);
-        __ JumpIfSmi(reg, true_label);
+        __ b(eq, instr->FalseLabel(chunk_));
+        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
       } else if (expected.NeedsMap()) {
         // If we need a map later and have a Smi -> deopt.
         __ SmiTst(reg);
@@ -2257,14 +2244,14 @@
           // Undetectable -> false.
           __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
           __ tst(ip, Operand(1 << Map::kIsUndetectable));
-          __ b(ne, false_label);
+          __ b(ne, instr->FalseLabel(chunk_));
         }
       }
 
       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
         // spec object -> true.
         __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
-        __ b(ge, true_label);
+        __ b(ge, instr->TrueLabel(chunk_));
       }
 
       if (expected.Contains(ToBooleanStub::STRING)) {
@@ -2274,15 +2261,15 @@
         __ b(ge, &not_string);
         __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
         __ cmp(ip, Operand::Zero());
-        __ b(ne, true_label);
-        __ b(false_label);
+        __ b(ne, instr->TrueLabel(chunk_));
+        __ b(instr->FalseLabel(chunk_));
         __ bind(&not_string);
       }
 
       if (expected.Contains(ToBooleanStub::SYMBOL)) {
         // Symbol value -> true.
         __ CompareInstanceType(map, ip, SYMBOL_TYPE);
-        __ b(eq, true_label);
+        __ b(eq, instr->TrueLabel(chunk_));
       }
 
       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
@@ -2294,13 +2281,16 @@
         __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
         __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
         __ cmp(r0, r0, vs);  // NaN -> false.
-        __ b(eq, false_label);  // +0, -0 -> false.
-        __ b(true_label);
+        __ b(eq, instr->FalseLabel(chunk_));  // +0, -0 -> false.
+        __ b(instr->TrueLabel(chunk_));
         __ bind(&not_heap_number);
       }
 
-      // We've seen something for the first time -> deopt.
-      DeoptimizeIf(al, instr->environment());
+      if (!expected.IsGeneric()) {
+        // We've seen something for the first time -> deopt.
+        // This can only happen if we are not generic already.
+        DeoptimizeIf(al, instr->environment());
+      }
     }
   }
 }
@@ -2308,7 +2298,7 @@
 
 void LCodeGen::EmitGoto(int block) {
   if (!IsNextEmittedBlock(block)) {
-    __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
+    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
   }
 }
 
@@ -2349,17 +2339,14 @@
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
   Condition cond = TokenToCondition(instr->op(), false);
 
   if (left->IsConstantOperand() && right->IsConstantOperand()) {
     // We can statically evaluate the comparison.
     double left_val = ToDouble(LConstantOperand::cast(left));
     double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block =
-      EvalComparison(instr->op(), left_val, right_val) ? true_block
-                                                       : false_block;
+    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
     EmitGoto(next_block);
   } else {
     if (instr->is_double()) {
@@ -2368,7 +2355,7 @@
       __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
       // If a NaN is involved, i.e. the result is unordered (V set),
       // jump to false block label.
-      __ b(vs, chunk_->GetAssemblyLabel(false_block));
+      __ b(vs, instr->FalseLabel(chunk_));
     } else {
       if (right->IsConstantOperand()) {
         int32_t value = ToInteger32(LConstantOperand::cast(right));
@@ -2390,7 +2377,7 @@
         __ cmp(ToRegister(left), ToRegister(right));
       }
     }
-    EmitBranch(true_block, false_block, cond);
+    EmitBranch(instr, cond);
   }
 }
 
@@ -2398,21 +2385,17 @@
 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   Register left = ToRegister(instr->left());
   Register right = ToRegister(instr->right());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
 
   __ cmp(left, Operand(right));
-  EmitBranch(true_block, false_block, eq);
+  EmitBranch(instr, eq);
 }
 
 
 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
   Register left = ToRegister(instr->left());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
   __ cmp(left, Operand(instr->hydrogen()->right()));
-  EmitBranch(true_block, false_block, eq);
+  EmitBranch(instr, eq);
 }
 
 
@@ -2447,22 +2430,21 @@
   Register reg = ToRegister(instr->value());
   Register temp1 = ToRegister(instr->temp());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* true_label = chunk_->GetAssemblyLabel(true_block);
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
   Condition true_cond =
-      EmitIsObject(reg, temp1, false_label, true_label);
+      EmitIsObject(reg, temp1,
+          instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
 
-  EmitBranch(true_block, false_block, true_cond);
+  EmitBranch(instr, true_cond);
 }
 
 
 Condition LCodeGen::EmitIsString(Register input,
                                  Register temp1,
-                                 Label* is_not_string) {
-  __ JumpIfSmi(input, is_not_string);
+                                 Label* is_not_string,
+                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
+  if (check_needed == INLINE_SMI_CHECK) {
+    __ JumpIfSmi(input, is_not_string);
+  }
   __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
 
   return lt;
@@ -2473,24 +2455,20 @@
   Register reg = ToRegister(instr->value());
   Register temp1 = ToRegister(instr->temp());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
+  SmiCheck check_needed =
+      instr->hydrogen()->value()->IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   Condition true_cond =
-      EmitIsString(reg, temp1, false_label);
+      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
 
-  EmitBranch(true_block, false_block, true_cond);
+  EmitBranch(instr, true_cond);
 }
 
 
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
   Register input_reg = EmitLoadRegister(instr->value(), ip);
   __ SmiTst(input_reg);
-  EmitBranch(true_block, false_block, eq);
+  EmitBranch(instr, eq);
 }
 
 
@@ -2498,14 +2476,13 @@
   Register input = ToRegister(instr->value());
   Register temp = ToRegister(instr->temp());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
   __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
   __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
   __ tst(temp, Operand(1 << Map::kIsUndetectable));
-  EmitBranch(true_block, false_block, ne);
+  EmitBranch(instr, ne);
 }
 
 
@@ -2531,8 +2508,6 @@
 
 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   Token::Value op = instr->op();
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -2541,7 +2516,7 @@
 
   Condition condition = ComputeCompareCondition(op);
 
-  EmitBranch(true_block, false_block, condition);
+  EmitBranch(instr, condition);
 }
 
 
@@ -2569,15 +2544,12 @@
   Register scratch = scratch0();
   Register input = ToRegister(instr->value());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  __ JumpIfSmi(input, false_label);
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
 
   __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
-  EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
+  EmitBranch(instr, BranchCondition(instr->hydrogen()));
 }
 
 
@@ -2597,13 +2569,10 @@
   Register input = ToRegister(instr->value());
   Register scratch = scratch0();
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
   __ ldr(scratch,
          FieldMemOperand(input, String::kHashFieldOffset));
   __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
-  EmitBranch(true_block, false_block, eq);
+  EmitBranch(instr, eq);
 }
 
 
@@ -2680,27 +2649,20 @@
   Register temp2 = ToRegister(instr->temp());
   Handle<String> class_name = instr->hydrogen()->class_name();
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+      class_name, input, temp, temp2);
 
-  Label* true_label = chunk_->GetAssemblyLabel(true_block);
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
-  EmitBranch(true_block, false_block, eq);
+  EmitBranch(instr, eq);
 }
 
 
 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   Register reg = ToRegister(instr->value());
   Register temp = ToRegister(instr->temp());
-  int true_block = instr->true_block_id();
-  int false_block = instr->false_block_id();
 
   __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
   __ cmp(temp, Operand(instr->map()));
-  EmitBranch(true_block, false_block, eq);
+  EmitBranch(instr, eq);
 }
 
 
@@ -3019,9 +2981,9 @@
 
   __ str(value, target);
   if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
     SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+        instr->hydrogen()->value()->IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     __ RecordWriteContextSlot(context,
                               target.offset(),
                               value,
@@ -4152,12 +4114,9 @@
   ASSERT(ToRegister(instr->result()).is(r0));
 
   __ mov(r0, Operand(instr->arity()));
-  if (FLAG_optimize_constructed_arrays) {
-    // No cell in r2 for construct type feedback in optimized code
-    Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
-                                   isolate());
-    __ mov(r2, Operand(undefined_value));
-  }
+  // No cell in r2 for construct type feedback in optimized code
+  Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+  __ mov(r2, Operand(undefined_value));
   CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
   CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
 }
@@ -4166,16 +4125,18 @@
 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   ASSERT(ToRegister(instr->constructor()).is(r1));
   ASSERT(ToRegister(instr->result()).is(r0));
-  ASSERT(FLAG_optimize_constructed_arrays);
 
   __ mov(r0, Operand(instr->arity()));
   __ mov(r2, Operand(instr->hydrogen()->property_cell()));
   ElementsKind kind = instr->hydrogen()->elements_kind();
-  bool disable_allocation_sites =
-      (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
+  AllocationSiteOverrideMode override_mode =
+      (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE)
+          ? DISABLE_ALLOCATION_SITES
+          : DONT_OVERRIDE;
+  ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
 
   if (instr->arity() == 0) {
-    ArrayNoArgumentConstructorStub stub(kind, disable_allocation_sites);
+    ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   } else if (instr->arity() == 1) {
     Label done;
@@ -4188,18 +4149,18 @@
       __ b(eq, &packed_case);
 
       ElementsKind holey_kind = GetHoleyElementsKind(kind);
-      ArraySingleArgumentConstructorStub stub(holey_kind,
-                                              disable_allocation_sites);
+      ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+                                              override_mode);
       CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
       __ jmp(&done);
       __ bind(&packed_case);
     }
 
-    ArraySingleArgumentConstructorStub stub(kind, disable_allocation_sites);
+    ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
     __ bind(&done);
   } else {
-    ArrayNArgumentsConstructorStub stub(kind, disable_allocation_sites);
+    ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   }
 }
@@ -4263,9 +4224,9 @@
   // Do the store.
   Register value = ToRegister(instr->value());
   ASSERT(!object.is(value));
-  HType type = instr->hydrogen()->value()->type();
   SmiCheck check_needed =
-      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+      instr->hydrogen()->value()->IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   if (access.IsInobject()) {
     __ str(value, FieldMemOperand(object, offset));
     if (instr->hydrogen()->NeedsWriteBarrier()) {
@@ -4474,9 +4435,9 @@
   __ str(value, FieldMemOperand(store_base, offset));
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
     SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+        instr->hydrogen()->value()->IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
     __ add(key, store_base, Operand(offset - kHeapObjectTag));
     __ RecordWrite(elements,
@@ -5198,9 +5159,11 @@
 
 
 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
-  LOperand* input = instr->value();
-  __ SmiTst(ToRegister(input));
-  DeoptimizeIf(eq, instr->environment());
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    LOperand* input = instr->value();
+    __ SmiTst(ToRegister(input));
+    DeoptimizeIf(eq, instr->environment());
+  }
 }
 
 
@@ -5365,6 +5328,80 @@
 }
 
 
+void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
+  class DeferredAllocateObject: public LDeferredCode {
+   public:
+    DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LAllocateObject* instr_;
+  };
+
+  DeferredAllocateObject* deferred =
+      new(zone()) DeferredAllocateObject(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->temp());
+  Register scratch2 = ToRegister(instr->temp2());
+  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+  Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+  int instance_size = initial_map->instance_size();
+  ASSERT(initial_map->pre_allocated_property_fields() +
+         initial_map->unused_property_fields() -
+         initial_map->inobject_properties() == 0);
+
+  __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(),
+              TAG_OBJECT);
+
+  __ bind(deferred->exit());
+  if (FLAG_debug_code) {
+    Label is_in_new_space;
+    __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
+    __ Abort("Allocated object is not in new-space");
+    __ bind(&is_in_new_space);
+  }
+
+  // Load the initial map.
+  Register map = scratch;
+  __ LoadHeapObject(map, constructor);
+  __ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Initialize map and fields of the newly allocated object.
+  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+  __ str(map, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+  __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
+  __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
+  if (initial_map->inobject_properties() != 0) {
+    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+    for (int i = 0; i < initial_map->inobject_properties(); i++) {
+      int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+      __ str(scratch, FieldMemOperand(result, property_offset));
+    }
+  }
+}
+
+
+void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+  Register result = ToRegister(instr->result());
+  Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+  int instance_size = initial_map->instance_size();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, Operand::Zero());
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ mov(r0, Operand(Smi::FromInt(instance_size)));
+  __ push(r0);
+  CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+  __ StoreToSafepointRegisterSlot(r0, result);
+}
+
+
 void LCodeGen::DoAllocate(LAllocate* instr) {
   class DeferredAllocate: public LDeferredCode {
    public:
@@ -5523,17 +5560,13 @@
 
 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   Register input = ToRegister(instr->value());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* true_label = chunk_->GetAssemblyLabel(true_block);
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-  Condition final_branch_condition = EmitTypeofIs(true_label,
-                                                  false_label,
+  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
+                                                  instr->FalseLabel(chunk_),
                                                   input,
                                                   instr->type_literal());
   if (final_branch_condition != kNoCondition) {
-    EmitBranch(true_block, false_block, final_branch_condition);
+    EmitBranch(instr, final_branch_condition);
   }
 }
 
@@ -5618,11 +5651,9 @@
 
 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
   Register temp1 = ToRegister(instr->temp());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
   EmitIsConstructCall(temp1, scratch0());
-  EmitBranch(true_block, false_block, eq);
+  EmitBranch(instr, eq);
 }
 
 
@@ -5778,15 +5809,15 @@
   // properly registered for deoptimization and records the assembler's PC
   // offset.
   LEnvironment* environment = instr->environment();
-  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
-                                   instr->SpilledDoubleRegisterArray());
 
   // If the environment were already registered, we would have no way of
   // backpatching it with the spill slot operands.
   ASSERT(!environment->HasBeenRegistered());
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-  ASSERT(osr_pc_offset_ == -1);
-  osr_pc_offset_ = masm()->pc_offset();
+
+  // Normally we record the first unknown OSR value as the entrypoint to the OSR
+  // code, but if there were none, record the entrypoint here.
+  if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
 }
 
 
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index aeba5ea..075fb41 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -79,7 +79,6 @@
   Heap* heap() const { return isolate()->heap(); }
   Zone* zone() const { return zone_; }
 
-  // TODO(svenpanne) Use this consistently.
   int LookupDestination(int block_id) const {
     return chunk()->LookupDestination(block_id);
   }
@@ -151,6 +150,7 @@
   void DoDeferredRandom(LRandom* instr);
   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocateObject(LAllocateObject* instr);
   void DoDeferredAllocate(LAllocate* instr);
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
@@ -319,7 +319,8 @@
 
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
-  void EmitBranch(int left_block, int right_block, Condition cc);
+  template<class InstrType>
+  void EmitBranch(InstrType instr, Condition cc);
   void EmitNumberUntagD(Register input,
                         DwVfpRegister result,
                         bool allow_undefined_as_nan,
@@ -348,7 +349,8 @@
   // true and false label should be made, to optimize fallthrough.
   Condition EmitIsString(Register input,
                          Register temp1,
-                         Label* is_not_string);
+                         Label* is_not_string,
+                         SmiCheck check_needed);
 
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 81a2d37..fb379e2 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -3089,6 +3089,16 @@
 }
 
 
+void MacroAssembler::JumpIfNotUniqueName(Register reg,
+                                         Label* not_unique_name) {
+  STATIC_ASSERT(((SYMBOL_TYPE - 1) & kIsInternalizedMask) == kInternalizedTag);
+  cmp(reg, Operand(kInternalizedTag));
+  b(lt, not_unique_name);
+  cmp(reg, Operand(SYMBOL_TYPE));
+  b(gt, not_unique_name);
+}
+
+
 // Allocates a heap number or jumps to the need_gc label if the young space
 // is full and a scavenge is needed.
 void MacroAssembler::AllocateHeapNumber(Register result,
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 8d3626d..b76ebd5 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -1296,6 +1296,7 @@
                                               Register scratch,
                                               Label* failure);
 
+  void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
 
   // ---------------------------------------------------------------------------
   // Patching helpers.
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index decfa74..21cb802 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1698,12 +1698,61 @@
 }
 
 
+Handle<Code> CallStubCompiler::CompileArrayCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<Cell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name,
+    Code::StubType type) {
+  Label miss;
+
+  // Check that function is still array
+  const int argc = arguments().immediate();
+  GenerateNameCheck(name, &miss);
+  Register receiver = r1;
+
+  if (cell.is_null()) {
+    __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
+
+    // Check that the receiver isn't a smi.
+    __ JumpIfSmi(receiver, &miss);
+
+    // Check that the maps haven't changed.
+    CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0,
+                    r4, name, &miss);
+  } else {
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
+    GenerateLoadFunctionFromCell(cell, function, &miss);
+  }
+
+  Handle<Smi> kind(Smi::FromInt(GetInitialFastElementsKind()), isolate());
+  Handle<Cell> kind_feedback_cell =
+      isolate()->factory()->NewCell(kind);
+  __ mov(r0, Operand(argc));
+  __ mov(r2, Operand(kind_feedback_cell));
+  __ mov(r1, Operand(function));
+
+  ArrayConstructorStub stub(isolate());
+  __ TailCallStub(&stub);
+
+  __ bind(&miss);
+  GenerateMissBranch();
+
+  // Return the generated code.
+  return GetCode(type, name);
+}
+
+
 Handle<Code> CallStubCompiler::CompileArrayPushCall(
     Handle<Object> object,
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -1947,7 +1996,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -1956,7 +2005,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -2029,7 +2079,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -2038,7 +2088,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2111,7 +2162,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -2120,7 +2171,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2194,7 +2246,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -2203,7 +2255,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2266,7 +2319,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+  return GetCode(type, name);
 }
 
 
@@ -2275,7 +2328,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2374,7 +2428,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+  return GetCode(type, name);
 }
 
 
@@ -2383,7 +2437,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2472,7 +2527,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+  return GetCode(type, name);
 }
 
 
@@ -2653,7 +2708,8 @@
   if (HasCustomCallGenerator(function)) {
     Handle<Code> code = CompileCustomCall(object, holder,
                                           Handle<Cell>::null(),
-                                          function, Handle<String>::cast(name));
+                                          function, Handle<String>::cast(name),
+                                          Code::CONSTANT_FUNCTION);
     // A null handle means bail out to the regular compiler code below.
     if (!code.is_null()) return code;
   }
@@ -2719,7 +2775,8 @@
   // -----------------------------------
   if (HasCustomCallGenerator(function)) {
     Handle<Code> code = CompileCustomCall(
-        object, holder, cell, function, Handle<String>::cast(name));
+        object, holder, cell, function, Handle<String>::cast(name),
+        Code::NORMAL);
     // A null handle means bail out to the regular compiler code below.
     if (!code.is_null()) return code;
   }
diff --git a/src/arraybuffer.js b/src/arraybuffer.js
index 2b0c3dd..4a4f570 100644
--- a/src/arraybuffer.js
+++ b/src/arraybuffer.js
@@ -31,12 +31,12 @@
 
 // -------------------------------------------------------------------
 
-function ArrayBufferConstructor(byteLength) { // length = 1
+function ArrayBufferConstructor(length) { // length = 1
   if (%_IsConstructCall()) {
-    var l = TO_POSITIVE_INTEGER(byteLength);
-    %ArrayBufferInitialize(this, l);
+    var byteLength = ToPositiveInteger(length, 'invalid_array_buffer_length');
+    %ArrayBufferInitialize(this, byteLength);
   } else {
-    return new $ArrayBuffer(byteLength);
+    throw MakeTypeError('constructor_not_function', ["ArrayBuffer"]);
   }
 }
 
@@ -70,6 +70,9 @@
     fin = MathMin(relativeEnd, this.byteLength);
   }
 
+  if (fin < first) {
+    fin = first;
+  }
   var newLen = fin - first;
   // TODO(dslomov): implement inheritance
   var result = new $ArrayBuffer(newLen);
diff --git a/src/ast.cc b/src/ast.cc
index bf607d2..589bd5a 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -57,22 +57,22 @@
 
 
 bool Expression::IsSmiLiteral() {
-  return AsLiteral() != NULL && AsLiteral()->handle()->IsSmi();
+  return AsLiteral() != NULL && AsLiteral()->value()->IsSmi();
 }
 
 
 bool Expression::IsStringLiteral() {
-  return AsLiteral() != NULL && AsLiteral()->handle()->IsString();
+  return AsLiteral() != NULL && AsLiteral()->value()->IsString();
 }
 
 
 bool Expression::IsNullLiteral() {
-  return AsLiteral() != NULL && AsLiteral()->handle()->IsNull();
+  return AsLiteral() != NULL && AsLiteral()->value()->IsNull();
 }
 
 
 bool Expression::IsUndefinedLiteral() {
-  return AsLiteral() != NULL && AsLiteral()->handle()->IsUndefined();
+  return AsLiteral() != NULL && AsLiteral()->value()->IsUndefined();
 }
 
 
@@ -135,6 +135,7 @@
       binary_operation_(NULL),
       assignment_id_(GetNextId(isolate)),
       is_monomorphic_(false),
+      is_uninitialized_(false),
       store_mode_(STANDARD_STORE) { }
 
 
@@ -188,7 +189,7 @@
   emit_store_ = true;
   key_ = key;
   value_ = value;
-  Object* k = *key->handle();
+  Object* k = *key->value();
   if (k->IsInternalizedString() &&
       isolate->heap()->proto_string()->Equals(String::cast(k))) {
     kind_ = PROTOTYPE;
@@ -262,7 +263,7 @@
   for (int i = properties()->length() - 1; i >= 0; i--) {
     ObjectLiteral::Property* property = properties()->at(i);
     Literal* literal = property->key();
-    if (literal->handle()->IsNull()) continue;
+    if (literal->value()->IsNull()) continue;
     uint32_t hash = literal->Hash();
     // If the key of a computed property is in the table, do not emit
     // a store for the property later.
@@ -287,6 +288,16 @@
 }
 
 
+void UnaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
+  // TODO(olivf) If this Operation is used in a test context, then the
+  // expression has a ToBoolean stub and we want to collect the type
+  // information. However the GraphBuilder expects it to be on the instruction
+  // corresponding to the TestContext, therefore we have to store it here and
+  // not on the operand.
+  set_to_boolean_types(oracle->ToBooleanTypes(expression()->test_id()));
+}
+
+
 bool UnaryOperation::ResultOverwriteAllowed() {
   switch (op_) {
     case Token::BIT_NOT:
@@ -298,6 +309,16 @@
 }
 
 
+void BinaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
+  // TODO(olivf) If this Operation is used in a test context, then the right
+  // hand side has a ToBoolean stub and we want to collect the type information.
+  // However the GraphBuilder expects it to be on the instruction corresponding
+  // to the TestContext, therefore we have to store it here and not on the
+  // right hand operand.
+  set_to_boolean_types(oracle->ToBooleanTypes(right()->test_id()));
+}
+
+
 bool BinaryOperation::ResultOverwriteAllowed() {
   switch (op_) {
     case Token::COMMA:
@@ -337,7 +358,7 @@
                                       Handle<String>* check) {
   if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) {
     *expr = left->AsUnaryOperation()->expression();
-    *check = Handle<String>::cast(right->AsLiteral()->handle());
+    *check = Handle<String>::cast(right->AsLiteral()->value());
     return true;
   }
   return false;
@@ -417,6 +438,10 @@
 // ----------------------------------------------------------------------------
 // Recording of type feedback
 
+// TODO(rossberg): all RecordTypeFeedback functions should disappear
+// once we use the common type field in the AST consistently.
+
+
 void ForInStatement::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
   for_in_type_ = static_cast<ForInType>(oracle->ForInType(this));
 }
@@ -444,8 +469,8 @@
       is_function_prototype_ = true;
     } else {
       Literal* lit_key = key()->AsLiteral();
-      ASSERT(lit_key != NULL && lit_key->handle()->IsString());
-      Handle<String> name = Handle<String>::cast(lit_key->handle());
+      ASSERT(lit_key != NULL && lit_key->value()->IsString());
+      Handle<String> name = Handle<String>::cast(lit_key->value());
       oracle->LoadReceiverTypes(this, name, &receiver_types_);
     }
   } else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
@@ -465,12 +490,14 @@
   Property* prop = target()->AsProperty();
   ASSERT(prop != NULL);
   TypeFeedbackId id = AssignmentFeedbackId();
+  is_uninitialized_ = oracle->StoreIsUninitialized(id);
+  if (is_uninitialized_) return;
   is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
   receiver_types_.Clear();
   if (prop->key()->IsPropertyName()) {
     Literal* lit_key = prop->key()->AsLiteral();
-    ASSERT(lit_key != NULL && lit_key->handle()->IsString());
-    Handle<String> name = Handle<String>::cast(lit_key->handle());
+    ASSERT(lit_key != NULL && lit_key->value()->IsString());
+    Handle<String> name = Handle<String>::cast(lit_key->value());
     oracle->StoreReceiverTypes(this, name, &receiver_types_);
   } else if (is_monomorphic_) {
     // Record receiver type for monomorphic keyed stores.
@@ -612,8 +639,8 @@
   } else {
     // Method call.  Specialize for the receiver types seen at runtime.
     Literal* key = property->key()->AsLiteral();
-    ASSERT(key != NULL && key->handle()->IsString());
-    Handle<String> name = Handle<String>::cast(key->handle());
+    ASSERT(key != NULL && key->value()->IsString());
+    Handle<String> name = Handle<String>::cast(key->value());
     receiver_types_.Clear();
     oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
 #ifdef DEBUG
@@ -662,26 +689,6 @@
 }
 
 
-void UnaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
-  type_ = oracle->UnaryType(UnaryOperationFeedbackId());
-}
-
-
-void BinaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
-  oracle->BinaryType(BinaryOperationFeedbackId(),
-                     &left_type_, &right_type_, &result_type_,
-                     &has_fixed_right_arg_, &fixed_right_arg_value_);
-}
-
-
-// TODO(rossberg): this function (and all other RecordTypeFeedback functions)
-// should disappear once we use the common type field in the AST consistently.
-void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
-  oracle->CompareTypes(CompareOperationFeedbackId(),
-      &left_type_, &right_type_, &overall_type_, &compare_nil_type_);
-}
-
-
 // ----------------------------------------------------------------------------
 // Implementation of AstVisitor
 
@@ -1165,18 +1172,18 @@
 
 
 Handle<String> Literal::ToString() {
-  if (handle_->IsString()) return Handle<String>::cast(handle_);
+  if (value_->IsString()) return Handle<String>::cast(value_);
   Factory* factory = Isolate::Current()->factory();
-  ASSERT(handle_->IsNumber());
+  ASSERT(value_->IsNumber());
   char arr[100];
   Vector<char> buffer(arr, ARRAY_SIZE(arr));
   const char* str;
-  if (handle_->IsSmi()) {
+  if (value_->IsSmi()) {
     // Optimization only, the heap number case would subsume this.
-    OS::SNPrintF(buffer, "%d", Smi::cast(*handle_)->value());
+    OS::SNPrintF(buffer, "%d", Smi::cast(*value_)->value());
     str = arr;
   } else {
-    str = DoubleToCString(handle_->Number(), buffer);
+    str = DoubleToCString(value_->Number(), buffer);
   }
   return factory->NewStringFromAscii(CStrVector(str));
 }
diff --git a/src/ast.h b/src/ast.h
index b07456b..6336b3a 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -356,9 +356,11 @@
   // True iff the expression is the undefined literal.
   bool IsUndefinedLiteral();
 
-  // Expression type
-  Handle<Type> type() { return type_; }
-  void set_type(Handle<Type> type) { type_ = type; }
+  // Expression type bounds
+  Handle<Type> upper_type() { return upper_type_; }
+  Handle<Type> lower_type() { return lower_type_; }
+  void set_upper_type(Handle<Type> type) { upper_type_ = type; }
+  void set_lower_type(Handle<Type> type) { lower_type_ = type; }
 
   // Type feedback information for assignments and properties.
   virtual bool IsMonomorphic() {
@@ -381,7 +383,7 @@
   }
 
   // TODO(rossberg): this should move to its own AST node eventually.
-  void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
+  virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
   byte to_boolean_types() const { return to_boolean_types_; }
 
   BailoutId id() const { return id_; }
@@ -389,12 +391,15 @@
 
  protected:
   explicit Expression(Isolate* isolate)
-      : type_(Type::None(), isolate),
+      : upper_type_(Type::Any(), isolate),
+        lower_type_(Type::None(), isolate),
         id_(GetNextId(isolate)),
         test_id_(GetNextId(isolate)) {}
+  void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
 
  private:
-  Handle<Type> type_;
+  Handle<Type> upper_type_;
+  Handle<Type> lower_type_;
   byte to_boolean_types_;
 
   const BailoutId id_;
@@ -1308,36 +1313,36 @@
   DECLARE_NODE_TYPE(Literal)
 
   virtual bool IsPropertyName() {
-    if (handle_->IsInternalizedString()) {
+    if (value_->IsInternalizedString()) {
       uint32_t ignored;
-      return !String::cast(*handle_)->AsArrayIndex(&ignored);
+      return !String::cast(*value_)->AsArrayIndex(&ignored);
     }
     return false;
   }
 
   Handle<String> AsPropertyName() {
     ASSERT(IsPropertyName());
-    return Handle<String>::cast(handle_);
+    return Handle<String>::cast(value_);
   }
 
-  virtual bool ToBooleanIsTrue() { return handle_->BooleanValue(); }
-  virtual bool ToBooleanIsFalse() { return !handle_->BooleanValue(); }
+  virtual bool ToBooleanIsTrue() { return value_->BooleanValue(); }
+  virtual bool ToBooleanIsFalse() { return !value_->BooleanValue(); }
 
   // Identity testers.
   bool IsNull() const {
-    ASSERT(!handle_.is_null());
-    return handle_->IsNull();
+    ASSERT(!value_.is_null());
+    return value_->IsNull();
   }
   bool IsTrue() const {
-    ASSERT(!handle_.is_null());
-    return handle_->IsTrue();
+    ASSERT(!value_.is_null());
+    return value_->IsTrue();
   }
   bool IsFalse() const {
-    ASSERT(!handle_.is_null());
-    return handle_->IsFalse();
+    ASSERT(!value_.is_null());
+    return value_->IsFalse();
   }
 
-  Handle<Object> handle() const { return handle_; }
+  Handle<Object> value() const { return value_; }
 
   // Support for using Literal as a HashMap key. NOTE: Currently, this works
   // only for string and number literals!
@@ -1352,14 +1357,14 @@
   TypeFeedbackId LiteralFeedbackId() const { return reuse(id()); }
 
  protected:
-  Literal(Isolate* isolate, Handle<Object> handle)
+  Literal(Isolate* isolate, Handle<Object> value)
       : Expression(isolate),
-        handle_(handle) { }
+        value_(value) { }
 
  private:
   Handle<String> ToString();
 
-  Handle<Object> handle_;
+  Handle<Object> value_;
 };
 
 
@@ -1836,8 +1841,8 @@
   BailoutId MaterializeFalseId() { return materialize_false_id_; }
 
   TypeFeedbackId UnaryOperationFeedbackId() const { return reuse(id()); }
-  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
-  Handle<Type> type() const { return type_; }
+
+  virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
 
  protected:
   UnaryOperation(Isolate* isolate,
@@ -1858,8 +1863,6 @@
   Expression* expression_;
   int pos_;
 
-  Handle<Type> type_;
-
   // For unary not (Token::NOT), the AST ids where true and false will
   // actually be materialized, respectively.
   const BailoutId materialize_true_id_;
@@ -1881,12 +1884,10 @@
   BailoutId RightId() const { return right_id_; }
 
   TypeFeedbackId BinaryOperationFeedbackId() const { return reuse(id()); }
-  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
-  Handle<Type> left_type() const { return left_type_; }
-  Handle<Type> right_type() const { return right_type_; }
-  Handle<Type> result_type() const { return result_type_; }
-  bool has_fixed_right_arg() const { return has_fixed_right_arg_; }
-  int fixed_right_arg_value() const { return fixed_right_arg_value_; }
+  Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
+  void set_fixed_right_arg(Maybe<int> arg) { fixed_right_arg_ = arg; }
+
+  virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
 
  protected:
   BinaryOperation(Isolate* isolate,
@@ -1909,11 +1910,9 @@
   Expression* right_;
   int pos_;
 
-  Handle<Type> left_type_;
-  Handle<Type> right_type_;
-  Handle<Type> result_type_;
-  bool has_fixed_right_arg_;
-  int fixed_right_arg_value_;
+  // TODO(rossberg): the fixed arg should probably be represented as a Constant
+  // type for the RHS.
+  Maybe<int> fixed_right_arg_;
 
   // The short-circuit logical operations need an AST ID for their
   // right-hand subexpression.
@@ -1994,11 +1993,8 @@
 
   // Type feedback information.
   TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
-  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
-  Handle<Type> left_type() const { return left_type_; }
-  Handle<Type> right_type() const { return right_type_; }
-  Handle<Type> overall_type() const { return overall_type_; }
-  Handle<Type> compare_nil_type() const { return compare_nil_type_; }
+  Handle<Type> combined_type() const { return combined_type_; }
+  void set_combined_type(Handle<Type> type) { combined_type_ = type; }
 
   // Match special cases.
   bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@@ -2025,10 +2021,7 @@
   Expression* right_;
   int pos_;
 
-  Handle<Type> left_type_;
-  Handle<Type> right_type_;
-  Handle<Type> overall_type_;
-  Handle<Type> compare_nil_type_;
+  Handle<Type> combined_type_;
 };
 
 
@@ -2096,6 +2089,7 @@
   TypeFeedbackId AssignmentFeedbackId() { return reuse(id()); }
   void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
   virtual bool IsMonomorphic() { return is_monomorphic_; }
+  bool IsUninitialized() { return is_uninitialized_; }
   virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
   virtual KeyedAccessStoreMode GetStoreMode() {
     return store_mode_;
@@ -2126,6 +2120,7 @@
   const BailoutId assignment_id_;
 
   bool is_monomorphic_ : 1;
+  bool is_uninitialized_ : 1;
   KeyedAccessStoreMode store_mode_ : 5;  // Windows treats as signed,
                                          // must have extra bit.
   SmallMapList receiver_types_;
diff --git a/src/atomicops_internals_tsan.h b/src/atomicops_internals_tsan.h
index 6559336..e52c26c 100644
--- a/src/atomicops_internals_tsan.h
+++ b/src/atomicops_internals_tsan.h
@@ -62,97 +62,162 @@
 typedef int   __tsan_atomic32;
 typedef long  __tsan_atomic64;  // NOLINT
 
+#if defined(__SIZEOF_INT128__) \
+    || (__clang_major__ * 100 + __clang_minor__ >= 302)
+typedef __int128 __tsan_atomic128;
+#define __TSAN_HAS_INT128 1
+#else
+typedef char     __tsan_atomic128;
+#define __TSAN_HAS_INT128 0
+#endif
+
 typedef enum {
-  __tsan_memory_order_relaxed = (1 << 0) + 100500,
-  __tsan_memory_order_consume = (1 << 1) + 100500,
-  __tsan_memory_order_acquire = (1 << 2) + 100500,
-  __tsan_memory_order_release = (1 << 3) + 100500,
-  __tsan_memory_order_acq_rel = (1 << 4) + 100500,
-  __tsan_memory_order_seq_cst = (1 << 5) + 100500,
+  __tsan_memory_order_relaxed,
+  __tsan_memory_order_consume,
+  __tsan_memory_order_acquire,
+  __tsan_memory_order_release,
+  __tsan_memory_order_acq_rel,
+  __tsan_memory_order_seq_cst,
 } __tsan_memory_order;
 
-__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
     __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
     __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
     __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
+    __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
     __tsan_memory_order mo);
 
-void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
+void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
     __tsan_memory_order mo);
-void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
+void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
     __tsan_memory_order mo);
-void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
+void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
     __tsan_memory_order mo);
-void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
+void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
+    __tsan_memory_order mo);
+void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
     __tsan_memory_order mo);
 
-__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
     __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
     __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
     __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
 
-__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
-    __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
-    __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
-    __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
-    __tsan_atomic64 v, __tsan_memory_order mo);
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
+    __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
 
-__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
-    __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
-    __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
-    __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
-    __tsan_atomic64 v, __tsan_memory_order mo);
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
+    __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
 
-__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
-    __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
-    __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
-    __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
-    __tsan_atomic64 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
-    __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
-    __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
-    __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
-    __tsan_atomic64 v, __tsan_memory_order mo);
-
-int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
-    __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
-int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
-    __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
-int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
-    __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
-int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
-    __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
-
-int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
-    __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
-int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
-    __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
-int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
-    __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
-int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
-    __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
+    volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
+    volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
+    volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
+    volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
+    volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
 
 void __tsan_atomic_thread_fence(__tsan_memory_order mo);
+void __tsan_atomic_signal_fence(__tsan_memory_order mo);
 
 #ifdef __cplusplus
 }  // extern "C"
@@ -160,166 +225,166 @@
 
 #endif  // #ifndef TSAN_INTERFACE_ATOMIC_H
 
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
                                          Atomic32 old_value,
                                          Atomic32 new_value) {
   Atomic32 cmp = old_value;
   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
-                                          __tsan_memory_order_relaxed);
+      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
   return cmp;
 }
 
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
                                          Atomic32 new_value) {
   return __tsan_atomic32_exchange(ptr, new_value,
-                                  __tsan_memory_order_relaxed);
+      __tsan_memory_order_relaxed);
 }
 
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
                                        Atomic32 new_value) {
   return __tsan_atomic32_exchange(ptr, new_value,
-                                  __tsan_memory_order_acquire);
+      __tsan_memory_order_acquire);
 }
 
-inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
+inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
                                        Atomic32 new_value) {
   return __tsan_atomic32_exchange(ptr, new_value,
-                                  __tsan_memory_order_release);
+      __tsan_memory_order_release);
 }
 
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
                                           Atomic32 increment) {
   return increment + __tsan_atomic32_fetch_add(ptr, increment,
-                                               __tsan_memory_order_relaxed);
+      __tsan_memory_order_relaxed);
 }
 
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
                                         Atomic32 increment) {
   return increment + __tsan_atomic32_fetch_add(ptr, increment,
-                                               __tsan_memory_order_acq_rel);
+      __tsan_memory_order_acq_rel);
 }
 
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
   Atomic32 cmp = old_value;
   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
-                                          __tsan_memory_order_acquire);
+      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
   return cmp;
 }
 
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
   Atomic32 cmp = old_value;
   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
-                                          __tsan_memory_order_release);
+      __tsan_memory_order_release, __tsan_memory_order_relaxed);
   return cmp;
 }
 
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
   __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
 }
 
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
   __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
 }
 
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
   __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
 }
 
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
   return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
 }
 
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
   return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
 }
 
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
   return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
 }
 
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
                                          Atomic64 old_value,
                                          Atomic64 new_value) {
   Atomic64 cmp = old_value;
   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
-                                          __tsan_memory_order_relaxed);
+      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
   return cmp;
 }
 
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
                                          Atomic64 new_value) {
   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
 }
 
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
                                        Atomic64 new_value) {
   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
 }
 
-inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
+inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
                                        Atomic64 new_value) {
   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
 }
 
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
                                           Atomic64 increment) {
   return increment + __tsan_atomic64_fetch_add(ptr, increment,
-                                               __tsan_memory_order_relaxed);
+      __tsan_memory_order_relaxed);
 }
 
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
                                         Atomic64 increment) {
   return increment + __tsan_atomic64_fetch_add(ptr, increment,
-                                               __tsan_memory_order_acq_rel);
+      __tsan_memory_order_acq_rel);
 }
 
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
   __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
 }
 
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
   __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
 }
 
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
   __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
 }
 
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
   return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
 }
 
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
   return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
   return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
 }
 
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
                                        Atomic64 old_value,
                                        Atomic64 new_value) {
   Atomic64 cmp = old_value;
   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
-                                          __tsan_memory_order_acquire);
+      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
   return cmp;
 }
 
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
                                        Atomic64 old_value,
                                        Atomic64 new_value) {
   Atomic64 cmp = old_value;
   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
-                                          __tsan_memory_order_release);
+      __tsan_memory_order_release, __tsan_memory_order_relaxed);
   return cmp;
 }
 
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index c78bf9c..d5b46de 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -859,12 +859,18 @@
                         isolate->initial_object_prototype(),
                         Builtins::kArrayCode, true, true);
     array_function->shared()->DontAdaptArguments();
+    array_function->shared()->set_function_data(Smi::FromInt(kArrayCode));
 
     // This seems a bit hackish, but we need to make sure Array.length
     // is 1.
     array_function->shared()->set_length(1);
 
     Handle<Map> initial_map(array_function->initial_map());
+
+    // This assert protects an optimization in
+    // HGraphBuilder::JSArrayBuilder::EmitMapCode()
+    ASSERT(initial_map->elements_kind() == GetInitialFastElementsKind());
+
     Handle<DescriptorArray> array_descriptors(
         factory->NewDescriptorArray(0, 1));
     DescriptorArray::WhitenessWitness witness(*array_descriptors);
@@ -885,16 +891,11 @@
     // overwritten by JS code.
     native_context()->set_array_function(*array_function);
 
-    if (FLAG_optimize_constructed_arrays) {
-      // Cache the array maps, needed by ArrayConstructorStub
-      CacheInitialJSArrayMaps(native_context(), initial_map);
-      ArrayConstructorStub array_constructor_stub(isolate);
-      Handle<Code> code = array_constructor_stub.GetCode(isolate);
-      array_function->shared()->set_construct_stub(*code);
-    } else {
-      array_function->shared()->set_construct_stub(
-          isolate->builtins()->builtin(Builtins::kCommonArrayConstructCode));
-    }
+    // Cache the array maps, needed by ArrayConstructorStub
+    CacheInitialJSArrayMaps(native_context(), initial_map);
+    ArrayConstructorStub array_constructor_stub(isolate);
+    Handle<Code> code = array_constructor_stub.GetCode(isolate);
+    array_function->shared()->set_construct_stub(*code);
   }
 
   {  // --- N u m b e r ---
@@ -1361,6 +1362,14 @@
     Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray",
         EXTERNAL_PIXEL_ELEMENTS);
     native_context()->set_uint8c_array_fun(*uint8c_fun);
+
+    Handle<JSFunction> data_view_fun =
+        InstallFunction(
+            global, "DataView", JS_DATA_VIEW_TYPE,
+            JSDataView::kSize,
+            isolate()->initial_object_prototype(),
+            Builtins::kIllegal, true, true);
+    native_context()->set_data_view_fun(*data_view_fun);
   }
 
   if (FLAG_harmony_generators) {
@@ -1614,15 +1623,9 @@
       factory()->NewJSObject(isolate()->object_function(), TENURED);
   SetPrototype(array_function, prototype);
 
-  if (FLAG_optimize_constructed_arrays) {
-    InternalArrayConstructorStub internal_array_constructor_stub(isolate());
-    Handle<Code> code = internal_array_constructor_stub.GetCode(isolate());
-    array_function->shared()->set_construct_stub(*code);
-  } else {
-    array_function->shared()->set_construct_stub(
-        isolate()->builtins()->builtin(Builtins::kCommonArrayConstructCode));
-  }
-
+  InternalArrayConstructorStub internal_array_constructor_stub(isolate());
+  Handle<Code> code = internal_array_constructor_stub.GetCode(isolate());
+  array_function->shared()->set_construct_stub(*code);
   array_function->shared()->DontAdaptArguments();
 
   Handle<Map> original_map(array_function->initial_map());
@@ -2577,7 +2580,14 @@
   StackLimitCheck check(isolate);
   if (check.HasOverflowed()) return;
 
-  native_context_ = Snapshot::NewContextFromSnapshot();
+  // We can only de-serialize a context if the isolate was initialized from
+  // a snapshot. Otherwise we have to build the context from scratch.
+  if (isolate->initialized_from_snapshot()) {
+    native_context_ = Snapshot::NewContextFromSnapshot();
+  } else {
+    native_context_ = Handle<Context>();
+  }
+
   if (!native_context().is_null()) {
     AddToWeakNativeContextList(*native_context());
     isolate->set_context(*native_context());
diff --git a/src/builtins.cc b/src/builtins.cc
index fd7dd8a..5cc7915 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -209,23 +209,21 @@
     MaybeObject* maybe_array = array->Initialize(0);
     if (maybe_array->IsFailure()) return maybe_array;
 
-    if (FLAG_optimize_constructed_arrays) {
-      AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(array);
-      ElementsKind to_kind = array->GetElementsKind();
-      if (info != NULL && info->GetElementsKindPayload(&to_kind)) {
-        if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
-                                                to_kind)) {
-          // We have advice that we should change the elements kind
-          if (FLAG_trace_track_allocation_sites) {
-            PrintF("AllocationSiteInfo: pre-transitioning array %p(%s->%s)\n",
-                   reinterpret_cast<void*>(array),
-                   ElementsKindToString(array->GetElementsKind()),
-                   ElementsKindToString(to_kind));
-          }
-
-          maybe_array = array->TransitionElementsKind(to_kind);
-          if (maybe_array->IsFailure()) return maybe_array;
+    AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(array);
+    ElementsKind to_kind = array->GetElementsKind();
+    if (info != NULL && info->GetElementsKindPayload(&to_kind)) {
+      if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
+                                              to_kind)) {
+        // We have advice that we should change the elements kind
+        if (FLAG_trace_track_allocation_sites) {
+          PrintF("AllocationSiteInfo: pre-transitioning array %p(%s->%s)\n",
+                 reinterpret_cast<void*>(array),
+                 ElementsKindToString(array->GetElementsKind()),
+                 ElementsKindToString(to_kind));
         }
+
+        maybe_array = array->TransitionElementsKind(to_kind);
+        if (maybe_array->IsFailure()) return maybe_array;
       }
     }
 
@@ -1267,14 +1265,7 @@
                                      args.length() - 1,
                                      is_construct);
 
-    v8::Handle<v8::Value> value;
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      ExternalCallbackScope call_scope(isolate,
-                                       v8::ToCData<Address>(callback_obj));
-      value = custom.Call(callback);
-    }
+    v8::Handle<v8::Value> value = custom.Call(callback);
     if (value.IsEmpty()) {
       result = heap->undefined_value();
     } else {
@@ -1343,14 +1334,7 @@
                                      &args[0] - 1,
                                      args.length() - 1,
                                      is_construct_call);
-    v8::Handle<v8::Value> value;
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      ExternalCallbackScope call_scope(isolate,
-                                       v8::ToCData<Address>(callback_obj));
-      value = custom.Call(callback);
-    }
+    v8::Handle<v8::Value> value = custom.Call(callback);
     if (value.IsEmpty()) {
       result = heap->undefined_value();
     } else {
diff --git a/src/builtins.h b/src/builtins.h
index df833df..edf650d 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -209,8 +209,6 @@
                                     Code::kNoExtraICState)              \
   V(ArrayCode,                      BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
-  V(CommonArrayConstructCode,       BUILTIN, UNINITIALIZED,             \
-                                    Code::kNoExtraICState)              \
                                                                         \
   V(StringConstructCode,            BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
@@ -399,7 +397,6 @@
 
   static void Generate_InternalArrayCode(MacroAssembler* masm);
   static void Generate_ArrayCode(MacroAssembler* masm);
-  static void Generate_CommonArrayConstructCode(MacroAssembler* masm);
 
   static void Generate_StringConstructCode(MacroAssembler* masm);
   static void Generate_OnStackReplacement(MacroAssembler* masm);
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 11cd307..06c9039 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -106,7 +106,8 @@
   };
 
   HValue* BuildArrayConstructor(ElementsKind kind,
-                                bool disable_allocation_sites,
+                                ContextCheckMode context_mode,
+                                AllocationSiteOverrideMode override_mode,
                                 ArgumentClass argument_class);
   HValue* BuildInternalArrayConstructor(ElementsKind kind,
                                         ArgumentClass argument_class);
@@ -534,15 +535,19 @@
 }
 
 HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
-    ElementsKind kind, bool disable_allocation_sites,
+    ElementsKind kind,
+    ContextCheckMode context_mode,
+    AllocationSiteOverrideMode override_mode,
     ArgumentClass argument_class) {
   HValue* constructor = GetParameter(ArrayConstructorStubBase::kConstructor);
-  HValue* property_cell = GetParameter(ArrayConstructorStubBase::kPropertyCell);
-  HInstruction* array_function = BuildGetArrayFunction(context());
+  if (context_mode == CONTEXT_CHECK_REQUIRED) {
+    HInstruction* array_function = BuildGetArrayFunction(context());
+    ArrayContextChecker checker(this, constructor, array_function);
+  }
 
-  ArrayContextChecker(this, constructor, array_function);
-  JSArrayBuilder array_builder(this, kind, property_cell,
-                               disable_allocation_sites);
+  HValue* property_cell = GetParameter(ArrayConstructorStubBase::kPropertyCell);
+  JSArrayBuilder array_builder(this, kind, property_cell, constructor,
+                               override_mode);
   HValue* result = NULL;
   switch (argument_class) {
     case NONE:
@@ -555,6 +560,7 @@
       result = BuildArrayNArgumentsConstructor(&array_builder, kind);
       break;
   }
+
   return result;
 }
 
@@ -599,7 +605,7 @@
   HConstant* initial_capacity_node = new(zone()) HConstant(initial_capacity);
   AddInstruction(initial_capacity_node);
 
-  HBoundsCheck* checked_arg = AddBoundsCheck(argument, max_alloc_length);
+  HBoundsCheck* checked_arg = Add<HBoundsCheck>(argument, max_alloc_length);
   IfBuilder if_builder(this);
   if_builder.IfCompare(checked_arg, constant_zero, Token::EQ);
   if_builder.Then();
@@ -652,8 +658,9 @@
 template <>
 HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
   ElementsKind kind = casted_stub()->elements_kind();
-  bool disable_allocation_sites = casted_stub()->disable_allocation_sites();
-  return BuildArrayConstructor(kind, disable_allocation_sites, NONE);
+  ContextCheckMode context_mode = casted_stub()->context_mode();
+  AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
+  return BuildArrayConstructor(kind, context_mode, override_mode, NONE);
 }
 
 
@@ -666,8 +673,9 @@
 HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
     BuildCodeStub() {
   ElementsKind kind = casted_stub()->elements_kind();
-  bool disable_allocation_sites = casted_stub()->disable_allocation_sites();
-  return BuildArrayConstructor(kind, disable_allocation_sites, SINGLE);
+  ContextCheckMode context_mode = casted_stub()->context_mode();
+  AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
+  return BuildArrayConstructor(kind, context_mode, override_mode, SINGLE);
 }
 
 
@@ -679,8 +687,9 @@
 template <>
 HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
   ElementsKind kind = casted_stub()->elements_kind();
-  bool disable_allocation_sites = casted_stub()->disable_allocation_sites();
-  return BuildArrayConstructor(kind, disable_allocation_sites, MULTIPLE);
+  ContextCheckMode context_mode = casted_stub()->context_mode();
+  AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
+  return BuildArrayConstructor(kind, context_mode, override_mode, MULTIPLE);
 }
 
 
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 5bec7e0..2d94ec9 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -757,24 +757,11 @@
 }
 
 
-FunctionEntryHook ProfileEntryHookStub::entry_hook_ = NULL;
-
-
 void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
                                                intptr_t stack_pointer) {
-  if (entry_hook_ != NULL)
-    entry_hook_(function, stack_pointer);
-}
-
-
-bool ProfileEntryHookStub::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
-  // We don't allow setting a new entry hook over one that's
-  // already active, as the hooks won't stack.
-  if (entry_hook != 0 && entry_hook_ != 0)
-    return false;
-
-  entry_hook_ = entry_hook;
-  return true;
+  FunctionEntryHook entry_hook = Isolate::Current()->function_entry_hook();
+  ASSERT(entry_hook != NULL);
+  entry_hook(function, stack_pointer);
 }
 
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index edbc93d..c7076b6 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -904,8 +904,7 @@
         left_type_(BinaryOpIC::UNINITIALIZED),
         right_type_(BinaryOpIC::UNINITIALIZED),
         result_type_(BinaryOpIC::UNINITIALIZED),
-        has_fixed_right_arg_(false),
-        encoded_right_arg_(encode_arg_value(1)) {
+        encoded_right_arg_(false, encode_arg_value(1)) {
     Initialize();
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
@@ -915,16 +914,15 @@
       BinaryOpIC::TypeInfo left_type,
       BinaryOpIC::TypeInfo right_type,
       BinaryOpIC::TypeInfo result_type,
-      bool has_fixed_right_arg,
-      int32_t fixed_right_arg_value)
+      Maybe<int32_t> fixed_right_arg)
       : op_(OpBits::decode(key)),
         mode_(ModeBits::decode(key)),
         platform_specific_bit_(PlatformSpecificBits::decode(key)),
         left_type_(left_type),
         right_type_(right_type),
         result_type_(result_type),
-        has_fixed_right_arg_(has_fixed_right_arg),
-        encoded_right_arg_(encode_arg_value(fixed_right_arg_value)) { }
+        encoded_right_arg_(fixed_right_arg.has_value,
+                           encode_arg_value(fixed_right_arg.value)) { }
 
   static void decode_types_from_minor_key(int minor_key,
                                           BinaryOpIC::TypeInfo* left_type,
@@ -942,16 +940,14 @@
     return static_cast<Token::Value>(OpBits::decode(minor_key));
   }
 
-  static bool decode_has_fixed_right_arg_from_minor_key(int minor_key) {
-    return HasFixedRightArgBits::decode(minor_key);
-  }
-
-  static int decode_fixed_right_arg_value_from_minor_key(int minor_key) {
-    return decode_arg_value(FixedRightArgValueBits::decode(minor_key));
+  static Maybe<int> decode_fixed_right_arg_from_minor_key(int minor_key) {
+    return Maybe<int>(
+        HasFixedRightArgBits::decode(minor_key),
+        decode_arg_value(FixedRightArgValueBits::decode(minor_key)));
   }
 
   int fixed_right_arg_value() const {
-    return decode_arg_value(encoded_right_arg_);
+    return decode_arg_value(encoded_right_arg_.value);
   }
 
   static bool can_encode_arg_value(int32_t value) {
@@ -975,8 +971,7 @@
   BinaryOpIC::TypeInfo right_type_;
   BinaryOpIC::TypeInfo result_type_;
 
-  bool has_fixed_right_arg_;
-  int encoded_right_arg_;
+  Maybe<int> encoded_right_arg_;
 
   static int encode_arg_value(int32_t value) {
     ASSERT(can_encode_arg_value(value));
@@ -1009,8 +1004,8 @@
            | LeftTypeBits::encode(left_type_)
            | RightTypeBits::encode(right_type_)
            | ResultTypeBits::encode(result_type_)
-           | HasFixedRightArgBits::encode(has_fixed_right_arg_)
-           | FixedRightArgValueBits::encode(encoded_right_arg_);
+           | HasFixedRightArgBits::encode(encoded_right_arg_.has_value)
+           | FixedRightArgValueBits::encode(encoded_right_arg_.value);
   }
 
 
@@ -1207,6 +1202,9 @@
   static byte ExtractTypesFromExtraICState(Code::ExtraICState state) {
     return state & ((1 << NUMBER_OF_TYPES) - 1);
   }
+  static NilValue ExtractNilValueFromExtraICState(Code::ExtraICState state) {
+    return NilValueField::decode(state);
+  }
 
   void Record(Handle<Object> object);
 
@@ -1735,27 +1733,51 @@
 };
 
 
+enum ContextCheckMode {
+  CONTEXT_CHECK_REQUIRED,
+  CONTEXT_CHECK_NOT_REQUIRED,
+  LAST_CONTEXT_CHECK_MODE = CONTEXT_CHECK_NOT_REQUIRED
+};
+
+
+enum AllocationSiteOverrideMode {
+  DONT_OVERRIDE,
+  DISABLE_ALLOCATION_SITES,
+  LAST_ALLOCATION_SITE_OVERRIDE_MODE = DISABLE_ALLOCATION_SITES
+};
+
+
 class ArrayConstructorStubBase : public HydrogenCodeStub {
  public:
-  ArrayConstructorStubBase(ElementsKind kind, bool disable_allocation_sites) {
+  ArrayConstructorStubBase(ElementsKind kind, ContextCheckMode context_mode,
+                           AllocationSiteOverrideMode override_mode) {
     // It only makes sense to override local allocation site behavior
     // if there is a difference between the global allocation site policy
     // for an ElementsKind and the desired usage of the stub.
-    ASSERT(!disable_allocation_sites ||
+    ASSERT(override_mode != DISABLE_ALLOCATION_SITES ||
            AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
     bit_field_ = ElementsKindBits::encode(kind) |
-        DisableAllocationSitesBits::encode(disable_allocation_sites);
+        AllocationSiteOverrideModeBits::encode(override_mode) |
+        ContextCheckModeBits::encode(context_mode);
   }
 
   ElementsKind elements_kind() const {
     return ElementsKindBits::decode(bit_field_);
   }
 
-  bool disable_allocation_sites() const {
-    return DisableAllocationSitesBits::decode(bit_field_);
+  AllocationSiteOverrideMode override_mode() const {
+    return AllocationSiteOverrideModeBits::decode(bit_field_);
   }
 
-  virtual bool IsPregenerated() { return true; }
+  ContextCheckMode context_mode() const {
+    return ContextCheckModeBits::decode(bit_field_);
+  }
+
+  virtual bool IsPregenerated() {
+    // We only pre-generate stubs that verify correct context
+    return context_mode() == CONTEXT_CHECK_REQUIRED;
+  }
+
   static void GenerateStubsAheadOfTime(Isolate* isolate);
   static void InstallDescriptors(Isolate* isolate);
 
@@ -1766,8 +1788,14 @@
  private:
   int NotMissMinorKey() { return bit_field_; }
 
+  // Ensure data fits within available bits.
+  STATIC_ASSERT(LAST_ALLOCATION_SITE_OVERRIDE_MODE == 1);
+  STATIC_ASSERT(LAST_CONTEXT_CHECK_MODE == 1);
+
   class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
-  class DisableAllocationSitesBits: public BitField<bool, 8, 1> {};
+  class AllocationSiteOverrideModeBits: public
+      BitField<AllocationSiteOverrideMode, 8, 1> {};  // NOLINT
+  class ContextCheckModeBits: public BitField<ContextCheckMode, 9, 1> {};
   uint32_t bit_field_;
 
   DISALLOW_COPY_AND_ASSIGN(ArrayConstructorStubBase);
@@ -1778,8 +1806,9 @@
  public:
   ArrayNoArgumentConstructorStub(
       ElementsKind kind,
-      bool disable_allocation_sites = false)
-      : ArrayConstructorStubBase(kind, disable_allocation_sites) {
+      ContextCheckMode context_mode = CONTEXT_CHECK_REQUIRED,
+      AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
+      : ArrayConstructorStubBase(kind, context_mode, override_mode) {
   }
 
   virtual Handle<Code> GenerateCode();
@@ -1799,8 +1828,9 @@
  public:
   ArraySingleArgumentConstructorStub(
       ElementsKind kind,
-      bool disable_allocation_sites = false)
-      : ArrayConstructorStubBase(kind, disable_allocation_sites) {
+      ContextCheckMode context_mode = CONTEXT_CHECK_REQUIRED,
+      AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
+      : ArrayConstructorStubBase(kind, context_mode, override_mode) {
   }
 
   virtual Handle<Code> GenerateCode();
@@ -1820,8 +1850,9 @@
  public:
   ArrayNArgumentsConstructorStub(
       ElementsKind kind,
-      bool disable_allocation_sites = false)
-      : ArrayConstructorStubBase(kind, disable_allocation_sites) {
+      ContextCheckMode context_mode = CONTEXT_CHECK_REQUIRED,
+      AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
+      : ArrayConstructorStubBase(kind, context_mode, override_mode) {
   }
 
   virtual Handle<Code> GenerateCode();
@@ -1973,7 +2004,7 @@
 
   class Types : public EnumSet<Type, byte> {
    public:
-    Types() {}
+    Types() : EnumSet<Type, byte>(0) {}
     explicit Types(byte bits) : EnumSet<Type, byte>(bits) {}
 
     byte ToByte() const { return ToIntegral(); }
@@ -1982,10 +2013,10 @@
     bool Record(Handle<Object> object);
     bool NeedsMap() const;
     bool CanBeUndetectable() const;
-  };
+    bool IsGeneric() const { return ToIntegral() == Generic().ToIntegral(); }
 
-  static Types no_types() { return Types(); }
-  static Types all_types() { return Types((1 << NUMBER_OF_TYPES) - 1); }
+    static Types Generic() { return Types((1 << NUMBER_OF_TYPES) - 1); }
+  };
 
   explicit ToBooleanStub(Types types = Types())
       : types_(types) { }
@@ -2137,13 +2168,6 @@
   // Generates a call to the entry hook if it's enabled.
   static void MaybeCallEntryHook(MacroAssembler* masm);
 
-  // Sets or unsets the entry hook function. Returns true on success,
-  // false on an attempt to replace a non-NULL entry hook with another
-  // non-NULL hook.
-  static bool SetFunctionEntryHook(FunctionEntryHook entry_hook);
-
-  static bool HasEntryHook() { return entry_hook_ != NULL; }
-
  private:
   static void EntryHookTrampoline(intptr_t function,
                                   intptr_t stack_pointer);
@@ -2153,9 +2177,6 @@
 
   void Generate(MacroAssembler* masm);
 
-  // The current function entry hook.
-  static FunctionEntryHook entry_hook_;
-
   DISALLOW_COPY_AND_ASSIGN(ProfileEntryHookStub);
 };
 
diff --git a/src/codegen.cc b/src/codegen.cc
index af2f1f6..7328e40 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -178,7 +178,7 @@
       !isolate->cpu_profiler()->is_profiling()) {
     return false;
   }
-  Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
+  Handle<String> name = Handle<String>::cast(type->AsLiteral()->value());
   if (FLAG_log_regexp) {
     if (name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("regexp")))
       return true;
diff --git a/src/compiler.cc b/src/compiler.cc
index 7b0b921..f643b9d 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -53,7 +53,8 @@
 namespace internal {
 
 
-CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
+CompilationInfo::CompilationInfo(Handle<Script> script,
+                                 Zone* zone)
     : flags_(LanguageModeField::encode(CLASSIC_MODE)),
       script_(script),
       osr_ast_id_(BailoutId::None()) {
@@ -71,7 +72,8 @@
 }
 
 
-CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
+CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
+                                 Zone* zone)
     : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
       closure_(closure),
       shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
@@ -83,7 +85,8 @@
 
 
 CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
-                                 Isolate* isolate, Zone* zone)
+                                 Isolate* isolate,
+                                 Zone* zone)
     : flags_(LanguageModeField::encode(CLASSIC_MODE) |
              IsLazy::encode(true)),
       osr_ast_id_(BailoutId::None()) {
@@ -92,7 +95,9 @@
 }
 
 
-void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
+void CompilationInfo::Initialize(Isolate* isolate,
+                                 Mode mode,
+                                 Zone* zone) {
   isolate_ = isolate;
   function_ = NULL;
   scope_ = NULL;
@@ -107,7 +112,7 @@
   no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
                    ? new List<OffsetRange>(2) : NULL;
   for (int i = 0; i < DependentCode::kGroupCount; i++) {
-    dependent_maps_[i] = NULL;
+    dependencies_[i] = NULL;
   }
   if (mode == STUB) {
     mode_ = STUB;
@@ -132,36 +137,42 @@
   // Check that no dependent maps have been added or added dependent maps have
   // been rolled back or committed.
   for (int i = 0; i < DependentCode::kGroupCount; i++) {
-    ASSERT_EQ(NULL, dependent_maps_[i]);
+    ASSERT_EQ(NULL, dependencies_[i]);
   }
 #endif  // DEBUG
 }
 
 
-void CompilationInfo::CommitDependentMaps(Handle<Code> code) {
+void CompilationInfo::CommitDependencies(Handle<Code> code) {
   for (int i = 0; i < DependentCode::kGroupCount; i++) {
-    ZoneList<Handle<Map> >* group_maps = dependent_maps_[i];
-    if (group_maps == NULL) continue;
+    ZoneList<Handle<HeapObject> >* group_objects = dependencies_[i];
+    if (group_objects == NULL) continue;
     ASSERT(!object_wrapper_.is_null());
-    for (int j = 0; j < group_maps->length(); j++) {
-      group_maps->at(j)->dependent_code()->UpdateToFinishedCode(
-          static_cast<DependentCode::DependencyGroup>(i), this, *code);
+    for (int j = 0; j < group_objects->length(); j++) {
+      DependentCode::DependencyGroup group =
+          static_cast<DependentCode::DependencyGroup>(i);
+      DependentCode* dependent_code =
+          DependentCode::ForObject(group_objects->at(j), group);
+      dependent_code->UpdateToFinishedCode(group, this, *code);
     }
-    dependent_maps_[i] = NULL;  // Zone-allocated, no need to delete.
+    dependencies_[i] = NULL;  // Zone-allocated, no need to delete.
   }
 }
 
 
-void CompilationInfo::RollbackDependentMaps() {
+void CompilationInfo::RollbackDependencies() {
   // Unregister from all dependent maps if not yet committed.
   for (int i = 0; i < DependentCode::kGroupCount; i++) {
-    ZoneList<Handle<Map> >* group_maps = dependent_maps_[i];
-    if (group_maps == NULL) continue;
-    for (int j = 0; j < group_maps->length(); j++) {
-      group_maps->at(j)->dependent_code()->RemoveCompilationInfo(
-          static_cast<DependentCode::DependencyGroup>(i), this);
+    ZoneList<Handle<HeapObject> >* group_objects = dependencies_[i];
+    if (group_objects == NULL) continue;
+    for (int j = 0; j < group_objects->length(); j++) {
+      DependentCode::DependencyGroup group =
+          static_cast<DependentCode::DependencyGroup>(i);
+      DependentCode* dependent_code =
+          DependentCode::ForObject(group_objects->at(j), group);
+      dependent_code->RemoveCompilationInfo(group, this);
     }
-    dependent_maps_[i] = NULL;  // Zone-allocated, no need to delete.
+    dependencies_[i] = NULL;  // Zone-allocated, no need to delete.
   }
 }
 
@@ -367,7 +378,10 @@
   // performance of the hydrogen-based compiler.
   bool should_recompile = !info()->shared_info()->has_deoptimization_support();
   if (should_recompile || FLAG_hydrogen_stats) {
-    HPhase phase(HPhase::kFullCodeGen, isolate());
+    int64_t start_ticks = 0;
+    if (FLAG_hydrogen_stats) {
+      start_ticks = OS::Ticks();
+    }
     CompilationInfoWithZone unoptimized(info()->shared_info());
     // Note that we use the same AST that we will use for generating the
     // optimized code.
@@ -384,6 +398,10 @@
       Compiler::RecordFunctionCompilation(
           Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
     }
+    if (FLAG_hydrogen_stats) {
+      int64_t ticks = OS::Ticks() - start_ticks;
+      isolate()->GetHStatistics()->IncrementFullCodeGen(ticks);
+    }
   }
 
   // Check that the unoptimized, shared code is ready for
@@ -528,7 +546,6 @@
 
 static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
-  ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
   PostponeInterruptsScope postpone(isolate);
 
   ASSERT(!isolate->native_context().is_null());
@@ -811,7 +828,6 @@
 
   // Check the function has compiled code.
   ASSERT(shared->is_compiled());
-  shared->set_code_age(0);
   shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
   shared->set_dont_inline(lit->flags()->Contains(kDontInline));
   shared->set_ast_node_count(lit->ast_node_count());
@@ -893,8 +909,6 @@
 bool Compiler::CompileLazy(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
 
-  ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
-
   // The VM is in the COMPILER state until exiting this function.
   VMState<COMPILER> state(isolate);
 
@@ -1038,12 +1052,15 @@
   // If crankshaft succeeded, install the optimized code else install
   // the unoptimized code.
   OptimizingCompiler::Status status = optimizing_compiler->last_status();
-  if (info->HasAbortedDueToDependentMap()) {
+  if (info->HasAbortedDueToDependencyChange()) {
     info->set_bailout_reason("bailed out due to dependent map");
     status = optimizing_compiler->AbortOptimization();
   } else if (status != OptimizingCompiler::SUCCEEDED) {
     info->set_bailout_reason("failed/bailed out last time");
     status = optimizing_compiler->AbortOptimization();
+  } else if (isolate->debugger()->IsDebuggerActive()) {
+    info->set_bailout_reason("debugger is active");
+    status = optimizing_compiler->AbortOptimization();
   } else {
     status = optimizing_compiler->GenerateAndInstallCode();
     ASSERT(status == OptimizingCompiler::SUCCEEDED ||
@@ -1207,4 +1224,31 @@
                  info));
 }
 
+
+CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
+    : name_(name), info_(info), zone_(info->isolate()) {
+  if (FLAG_hydrogen_stats) {
+    info_zone_start_allocation_size_ = info->zone()->allocation_size();
+    start_ticks_ = OS::Ticks();
+  }
+}
+
+
+CompilationPhase::~CompilationPhase() {
+  if (FLAG_hydrogen_stats) {
+    unsigned size = zone()->allocation_size();
+    size += info_->zone()->allocation_size() - info_zone_start_allocation_size_;
+    int64_t ticks = OS::Ticks() - start_ticks_;
+    isolate()->GetHStatistics()->SaveTiming(name_, ticks, size);
+  }
+}
+
+
+bool CompilationPhase::ShouldProduceTraceOutput() const {
+  // Produce trace output if flag is set so that the first letter of the
+  // phase name matches the command line parameter FLAG_trace_phase.
+  return (FLAG_trace_hydrogen &&
+          OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
+}
+
 } }  // namespace v8::internal
diff --git a/src/compiler.h b/src/compiler.h
index f53feb9..161f404 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -239,16 +239,17 @@
     deferred_handles_ = deferred_handles;
   }
 
-  ZoneList<Handle<Map> >* dependent_maps(DependentCode::DependencyGroup group) {
-    if (dependent_maps_[group] == NULL) {
-      dependent_maps_[group] = new(zone_) ZoneList<Handle<Map> >(2, zone_);
+  ZoneList<Handle<HeapObject> >* dependencies(
+      DependentCode::DependencyGroup group) {
+    if (dependencies_[group] == NULL) {
+      dependencies_[group] = new(zone_) ZoneList<Handle<HeapObject> >(2, zone_);
     }
-    return dependent_maps_[group];
+    return dependencies_[group];
   }
 
-  void CommitDependentMaps(Handle<Code> code);
+  void CommitDependencies(Handle<Code> code);
 
-  void RollbackDependentMaps();
+  void RollbackDependencies();
 
   void SaveHandles() {
     SaveHandle(&closure_);
@@ -291,18 +292,22 @@
     return object_wrapper_;
   }
 
-  void AbortDueToDependentMap() {
-    mode_ = DEPENDENT_MAP_ABORT;
+  void AbortDueToDependencyChange() {
+    mode_ = DEPENDENCY_CHANGE_ABORT;
   }
 
-  bool HasAbortedDueToDependentMap() {
-    return mode_ == DEPENDENT_MAP_ABORT;
+  bool HasAbortedDueToDependencyChange() {
+    return mode_ == DEPENDENCY_CHANGE_ABORT;
   }
 
  protected:
-  CompilationInfo(Handle<Script> script, Zone* zone);
-  CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
-  CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
+  CompilationInfo(Handle<Script> script,
+                  Zone* zone);
+  CompilationInfo(Handle<SharedFunctionInfo> shared_info,
+                  Zone* zone);
+  CompilationInfo(HydrogenCodeStub* stub,
+                  Isolate* isolate,
+                  Zone* zone);
 
  private:
   Isolate* isolate_;
@@ -317,7 +322,7 @@
     OPTIMIZE,
     NONOPT,
     STUB,
-    DEPENDENT_MAP_ABORT
+    DEPENDENCY_CHANGE_ABORT
   };
 
   void Initialize(Isolate* isolate, Mode mode, Zone* zone);
@@ -397,7 +402,7 @@
 
   DeferredHandles* deferred_handles_;
 
-  ZoneList<Handle<Map> >* dependent_maps_[DependentCode::kGroupCount];
+  ZoneList<Handle<HeapObject> >* dependencies_[DependentCode::kGroupCount];
 
   template<typename T>
   void SaveHandle(Handle<T> *object) {
@@ -429,31 +434,26 @@
  public:
   explicit CompilationInfoWithZone(Handle<Script> script)
       : CompilationInfo(script, &zone_),
-        zone_(script->GetIsolate()),
-        zone_scope_(&zone_, DELETE_ON_EXIT) {}
+        zone_(script->GetIsolate()) {}
   explicit CompilationInfoWithZone(Handle<SharedFunctionInfo> shared_info)
       : CompilationInfo(shared_info, &zone_),
-        zone_(shared_info->GetIsolate()),
-        zone_scope_(&zone_, DELETE_ON_EXIT) {}
+        zone_(shared_info->GetIsolate()) {}
   explicit CompilationInfoWithZone(Handle<JSFunction> closure)
       : CompilationInfo(closure, &zone_),
-        zone_(closure->GetIsolate()),
-        zone_scope_(&zone_, DELETE_ON_EXIT) {}
+        zone_(closure->GetIsolate()) {}
   CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
       : CompilationInfo(stub, isolate, &zone_),
-        zone_(isolate),
-        zone_scope_(&zone_, DELETE_ON_EXIT) {}
+        zone_(isolate) {}
 
   // Virtual destructor because a CompilationInfoWithZone has to exit the
   // zone scope and get rid of dependent maps even when the destructor is
   // called when cast as a CompilationInfo.
   virtual ~CompilationInfoWithZone() {
-    RollbackDependentMaps();
+    RollbackDependencies();
   }
 
  private:
   Zone zone_;
-  ZoneScope zone_scope_;
 };
 
 
@@ -617,6 +617,30 @@
 };
 
 
+class CompilationPhase BASE_EMBEDDED {
+ public:
+  CompilationPhase(const char* name, CompilationInfo* info);
+  ~CompilationPhase();
+
+ protected:
+  bool ShouldProduceTraceOutput() const;
+
+  const char* name() const { return name_; }
+  CompilationInfo* info() const { return info_; }
+  Isolate* isolate() const { return info()->isolate(); }
+  Zone* zone() { return &zone_; }
+
+ private:
+  const char* name_;
+  CompilationInfo* info_;
+  Zone zone_;
+  unsigned info_zone_start_allocation_size_;
+  int64_t start_ticks_;
+
+  DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_COMPILER_H_
diff --git a/src/contexts.h b/src/contexts.h
index f04ccd1..fdf6d27 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -134,6 +134,7 @@
   V(FLOAT_ARRAY_FUN_INDEX, JSFunction, float_array_fun) \
   V(DOUBLE_ARRAY_FUN_INDEX, JSFunction, double_array_fun) \
   V(UINT8C_ARRAY_FUN_INDEX, JSFunction, uint8c_array_fun) \
+  V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
   V(FUNCTION_MAP_INDEX, Map, function_map) \
   V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
   V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
@@ -299,6 +300,7 @@
     FLOAT_ARRAY_FUN_INDEX,
     DOUBLE_ARRAY_FUN_INDEX,
     UINT8C_ARRAY_FUN_INDEX,
+    DATA_VIEW_FUN_INDEX,
     MESSAGE_LISTENERS_INDEX,
     MAKE_MESSAGE_FUN_INDEX,
     GET_STACK_TRACE_LINE_INDEX,
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 4272219..109ddd5 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -191,9 +191,8 @@
 
 
 bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
-  if (!events_buffer_.IsEmpty()) {
-    CodeEventsContainer record;
-    events_buffer_.Dequeue(&record);
+  CodeEventsContainer record;
+  if (events_buffer_.Dequeue(&record)) {
     switch (record.generic.type) {
 #define PROFILER_TYPE_CASE(type, clss)                          \
       case CodeEventRecord::type:                               \
diff --git a/src/d8.cc b/src/d8.cc
index 4bfdb51..65af987 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -1105,7 +1105,7 @@
   }
   Handle<v8::ArrayBuffer> buffer = ArrayBuffer::New(data, length);
   v8::Persistent<v8::ArrayBuffer> weak_handle(isolate, buffer);
-  weak_handle.MakeWeak(isolate, data, ReadBufferWeakCallback);
+  weak_handle.MakeWeak(data, ReadBufferWeakCallback);
   weak_handle.MarkIndependent();
   isolate->AdjustAmountOfExternalAllocatedMemory(length);
 
diff --git a/src/debug.cc b/src/debug.cc
index 0ffdd00..efb95a0 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -390,6 +390,20 @@
 }
 
 
+bool BreakLocationIterator::IsStepInLocation(Isolate* isolate) {
+  if (RelocInfo::IsConstructCall(rmode())) {
+    return true;
+  } else if (RelocInfo::IsCodeTarget(rmode())) {
+    HandleScope scope(debug_info_->GetIsolate());
+    Address target = rinfo()->target_address();
+    Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
+    return target_code->is_call_stub() || target_code->is_keyed_call_stub();
+  } else {
+    return false;
+  }
+}
+
+
 void BreakLocationIterator::PrepareStepIn(Isolate* isolate) {
   HandleScope scope(isolate);
 
@@ -606,7 +620,7 @@
 void ScriptCache::Add(Handle<Script> script) {
   GlobalHandles* global_handles = Isolate::Current()->global_handles();
   // Create an entry in the hash map for the script.
-  int id = Smi::cast(script->id())->value();
+  int id = script->id()->value();
   HashMap::Entry* entry =
       HashMap::Lookup(reinterpret_cast<void*>(id), Hash(id), true);
   if (entry->value != NULL) {
@@ -674,7 +688,7 @@
   ASSERT((*location)->IsScript());
 
   // Remove the entry from the cache.
-  int id = Smi::cast((*location)->id())->value();
+  int id = (*location)->id()->value();
   script_cache->Remove(reinterpret_cast<void*>(id), Hash(id));
   script_cache->collected_scripts_.Add(id);
 
@@ -2046,13 +2060,30 @@
         if (obj->IsJSFunction()) {
           JSFunction* function = JSFunction::cast(obj);
           SharedFunctionInfo* shared = function->shared();
-          if (shared->allows_lazy_compilation() &&
-              shared->script()->IsScript() &&
-              function->code()->kind() == Code::FUNCTION &&
-              !function->code()->has_debug_break_slots() &&
-              shared->code()->gc_metadata() != active_code_marker) {
+
+          if (!shared->allows_lazy_compilation()) continue;
+          if (!shared->script()->IsScript()) continue;
+          if (shared->code()->gc_metadata() == active_code_marker) continue;
+
+          Code::Kind kind = function->code()->kind();
+          if (kind == Code::FUNCTION &&
+              !function->code()->has_debug_break_slots()) {
             function->set_code(*lazy_compile);
             function->shared()->set_code(*lazy_compile);
+          } else if (kind == Code::BUILTIN &&
+              (function->IsMarkedForInstallingRecompiledCode() ||
+               function->IsInRecompileQueue() ||
+               function->IsMarkedForLazyRecompilation() ||
+               function->IsMarkedForParallelRecompilation())) {
+            // Abort in-flight compilation.
+            Code* shared_code = function->shared()->code();
+            if (shared_code->kind() == Code::FUNCTION &&
+                shared_code->has_debug_break_slots()) {
+              function->set_code(shared_code);
+            } else {
+              function->set_code(*lazy_compile);
+              function->shared()->set_code(*lazy_compile);
+            }
           }
         }
       }
diff --git a/src/debug.h b/src/debug.h
index 467acb9..209d8db 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -97,6 +97,7 @@
   void ClearBreakPoint(Handle<Object> break_point_object);
   void SetOneShot();
   void ClearOneShot();
+  bool IsStepInLocation(Isolate* isolate);
   void PrepareStepIn(Isolate* isolate);
   bool IsExit() const;
   bool HasBreakPoint();
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index be0ea98..c6c098e 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -284,7 +284,7 @@
 void Deoptimizer::VisitAllOptimizedFunctionsForContext(
     Context* context, OptimizedFunctionVisitor* visitor) {
   Isolate* isolate = context->GetIsolate();
-  ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
+  Zone zone(isolate);
   DisallowHeapAllocation no_allocation;
 
   ASSERT(context->IsNativeContext());
@@ -293,11 +293,11 @@
 
   // Create a snapshot of the optimized functions list. This is needed because
   // visitors might remove more than one link from the list at once.
-  ZoneList<JSFunction*> snapshot(1, isolate->runtime_zone());
+  ZoneList<JSFunction*> snapshot(1, &zone);
   Object* element = context->OptimizedFunctionsListHead();
   while (!element->IsUndefined()) {
     JSFunction* element_function = JSFunction::cast(element);
-    snapshot.Add(element_function, isolate->runtime_zone());
+    snapshot.Add(element_function, &zone);
     element = element_function->next_function_link();
   }
 
@@ -420,11 +420,10 @@
   Context* context = function->context()->native_context();
   Isolate* isolate = context->GetIsolate();
   Object* undefined = isolate->heap()->undefined_value();
-  Zone* zone = isolate->runtime_zone();
-  ZoneScope zone_scope(zone, DELETE_ON_EXIT);
-  ZoneList<Code*> codes(1, zone);
+  Zone zone(isolate);
+  ZoneList<Code*> codes(1, &zone);
   DeoptimizeWithMatchingCodeFilter filter(code);
-  PartitionOptimizedFunctions(context, &filter, &codes, zone, undefined);
+  PartitionOptimizedFunctions(context, &filter, &codes, &zone, undefined);
   ASSERT_EQ(1, codes.length());
   DeoptimizeFunctionWithPreparedFunctionList(
       JSFunction::cast(codes.at(0)->deoptimizing_functions()));
@@ -437,10 +436,9 @@
   ASSERT(context->IsNativeContext());
   Isolate* isolate = context->GetIsolate();
   Object* undefined = isolate->heap()->undefined_value();
-  Zone* zone = isolate->runtime_zone();
-  ZoneScope zone_scope(zone, DELETE_ON_EXIT);
-  ZoneList<Code*> codes(1, zone);
-  PartitionOptimizedFunctions(context, filter, &codes, zone, undefined);
+  Zone zone(isolate);
+  ZoneList<Code*> codes(1, &zone);
+  PartitionOptimizedFunctions(context, filter, &codes, &zone, undefined);
   for (int i = 0; i < codes.length(); ++i) {
     DeoptimizeFunctionWithPreparedFunctionList(
         JSFunction::cast(codes.at(i)->deoptimizing_functions()));
@@ -547,6 +545,7 @@
   if (function != NULL && function->IsOptimized()) {
     function->shared()->increment_deopt_count();
     if (bailout_type_ == Deoptimizer::SOFT) {
+      isolate->counters()->soft_deopts_executed()->Increment();
       // Soft deopts shouldn't count against the overall re-optimization count
       // that can eventually lead to disabling optimization for a function.
       int opt_count = function->shared()->opt_count();
@@ -788,7 +787,6 @@
       case Translation::DOUBLE_STACK_SLOT:
       case Translation::LITERAL:
       case Translation::ARGUMENTS_OBJECT:
-      case Translation::DUPLICATE:
       default:
         UNREACHABLE();
         break;
@@ -1511,8 +1509,8 @@
   }
 
   output_frame_offset -= kPointerSize;
-  value = frame_ptr - (output_frame_size - output_frame_offset) -
-      StandardFrameConstants::kMarkerOffset + kPointerSize;
+  value = frame_ptr + StandardFrameConstants::kCallerSPOffset -
+      (output_frame_size - output_frame_offset) + kPointerSize;
   output_frame->SetFrameSlot(output_frame_offset, value);
   if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
@@ -1724,14 +1722,8 @@
   disasm::NameConverter converter;
   Address object_slot = deferred_objects_.last().slot_address();
 
-  // Ignore commands marked as duplicate and act on the first non-duplicate.
   Translation::Opcode opcode =
       static_cast<Translation::Opcode>(iterator->Next());
-  while (opcode == Translation::DUPLICATE) {
-    opcode = static_cast<Translation::Opcode>(iterator->Next());
-    iterator->Skip(Translation::NumberOfOperandsFor(opcode));
-    opcode = static_cast<Translation::Opcode>(iterator->Next());
-  }
 
   switch (opcode) {
     case Translation::BEGIN:
@@ -1742,7 +1734,6 @@
     case Translation::SETTER_STUB_FRAME:
     case Translation::COMPILED_STUB_FRAME:
     case Translation::ARGUMENTS_OBJECT:
-    case Translation::DUPLICATE:
       UNREACHABLE();
       return;
 
@@ -1925,14 +1916,8 @@
   const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
   bool is_native = value_type == TRANSLATED_VALUE_IS_NATIVE;
 
-  // Ignore commands marked as duplicate and act on the first non-duplicate.
   Translation::Opcode opcode =
       static_cast<Translation::Opcode>(iterator->Next());
-  while (opcode == Translation::DUPLICATE) {
-    opcode = static_cast<Translation::Opcode>(iterator->Next());
-    iterator->Skip(Translation::NumberOfOperandsFor(opcode));
-    opcode = static_cast<Translation::Opcode>(iterator->Next());
-  }
 
   switch (opcode) {
     case Translation::BEGIN:
@@ -1942,7 +1927,6 @@
     case Translation::GETTER_STUB_FRAME:
     case Translation::SETTER_STUB_FRAME:
     case Translation::COMPILED_STUB_FRAME:
-    case Translation::DUPLICATE:
       UNREACHABLE();
       return;
 
@@ -2196,10 +2180,6 @@
 
   Translation::Opcode opcode =
       static_cast<Translation::Opcode>(iterator->Next());
-  bool duplicate = (opcode == Translation::DUPLICATE);
-  if (duplicate) {
-    opcode = static_cast<Translation::Opcode>(iterator->Next());
-  }
 
   switch (opcode) {
     case Translation::BEGIN:
@@ -2209,21 +2189,20 @@
     case Translation::GETTER_STUB_FRAME:
     case Translation::SETTER_STUB_FRAME:
     case Translation::COMPILED_STUB_FRAME:
-    case Translation::DUPLICATE:
       UNREACHABLE();  // Malformed input.
-       return false;
+      return false;
 
-     case Translation::REGISTER: {
-       int output_reg = iterator->Next();
-       if (FLAG_trace_osr) {
-         PrintF("    %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
-                converter.NameOfCPURegister(output_reg),
-                input_value,
-                *input_offset);
-       }
-       output->SetRegister(output_reg, input_value);
-       break;
-     }
+    case Translation::REGISTER: {
+      int output_reg = iterator->Next();
+      if (FLAG_trace_osr) {
+        PrintF("    %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
+               converter.NameOfCPURegister(output_reg),
+               input_value,
+               *input_offset);
+      }
+      output->SetRegister(output_reg, input_value);
+      break;
+    }
 
     case Translation::INT32_REGISTER: {
       int32_t int32_value = 0;
@@ -2368,7 +2347,7 @@
     }
   }
 
-  if (!duplicate) *input_offset -= kPointerSize;
+  *input_offset -= kPointerSize;
   return true;
 }
 
@@ -2822,15 +2801,18 @@
 }
 
 
-void Translation::MarkDuplicate() {
-  buffer_->Add(DUPLICATE, zone());
+void Translation::StoreArgumentsObject(bool args_known,
+                                       int args_index,
+                                       int args_length) {
+  buffer_->Add(ARGUMENTS_OBJECT, zone());
+  buffer_->Add(args_known, zone());
+  buffer_->Add(args_index, zone());
+  buffer_->Add(args_length, zone());
 }
 
 
 int Translation::NumberOfOperandsFor(Opcode opcode) {
   switch (opcode) {
-    case DUPLICATE:
-      return 0;
     case GETTER_STUB_FRAME:
     case SETTER_STUB_FRAME:
     case ARGUMENTS_OBJECT:
@@ -2895,8 +2877,6 @@
       return "LITERAL";
     case ARGUMENTS_OBJECT:
       return "ARGUMENTS_OBJECT";
-    case DUPLICATE:
-      return "DUPLICATE";
   }
   UNREACHABLE();
   return "";
@@ -2948,7 +2928,6 @@
     case Translation::INT32_REGISTER:
     case Translation::UINT32_REGISTER:
     case Translation::DOUBLE_REGISTER:
-    case Translation::DUPLICATE:
       // We are at safepoint which corresponds to call.  All registers are
       // saved by caller so there would be no live registers at this
       // point. Thus these translation commands should not be used.
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 5ca635c..d28be23 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -713,11 +713,7 @@
     INT32_STACK_SLOT,
     UINT32_STACK_SLOT,
     DOUBLE_STACK_SLOT,
-    LITERAL,
-
-    // A prefix indicating that the next command is a duplicate of the one
-    // that follows it.
-    DUPLICATE
+    LITERAL
   };
 
   Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count,
@@ -749,7 +745,7 @@
   void StoreUint32StackSlot(int index);
   void StoreDoubleStackSlot(int index);
   void StoreLiteral(int literal_id);
-  void MarkDuplicate();
+  void StoreArgumentsObject(bool args_known, int args_index, int args_length);
 
   Zone* zone() const { return zone_; }
 
diff --git a/src/factory.cc b/src/factory.cc
index 8b842a7..673bb23 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -259,6 +259,32 @@
 }
 
 
+template<typename SinkChar, typename StringType>
+Handle<String> ConcatStringContent(Handle<StringType> result,
+                                   Handle<String> first,
+                                   Handle<String> second) {
+  DisallowHeapAllocation pointer_stays_valid;
+  SinkChar* sink = result->GetChars();
+  String::WriteToFlat(*first, sink, 0, first->length());
+  String::WriteToFlat(*second, sink + first->length(), 0, second->length());
+  return result;
+}
+
+
+Handle<String> Factory::NewFlatConcatString(Handle<String> first,
+                                            Handle<String> second) {
+  int total_length = first->length() + second->length();
+  if (first->IsOneByteRepresentationUnderneath() &&
+      second->IsOneByteRepresentationUnderneath()) {
+    return ConcatStringContent<uint8_t>(
+        NewRawOneByteString(total_length), first, second);
+  } else {
+    return ConcatStringContent<uc16>(
+        NewRawTwoByteString(total_length), first, second);
+  }
+}
+
+
 Handle<String> Factory::NewSubString(Handle<String> str,
                                      int begin,
                                      int end) {
@@ -408,27 +434,17 @@
 
 Handle<Script> Factory::NewScript(Handle<String> source) {
   // Generate id for this script.
-  int id;
   Heap* heap = isolate()->heap();
-  if (heap->last_script_id()->IsUndefined()) {
-    // Script ids start from one.
-    id = 1;
-  } else {
-    // Increment id, wrap when positive smi is exhausted.
-    id = Smi::cast(heap->last_script_id())->value();
-    id++;
-    if (!Smi::IsValid(id)) {
-      id = 0;
-    }
-  }
-  heap->SetLastScriptId(Smi::FromInt(id));
+  int id = heap->last_script_id()->value() + 1;
+  if (!Smi::IsValid(id) || id < 0) id = 1;
+  heap->set_last_script_id(Smi::FromInt(id));
 
   // Create and initialize script object.
   Handle<Foreign> wrapper = NewForeign(0, TENURED);
   Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
   script->set_source(*source);
   script->set_name(heap->undefined_value());
-  script->set_id(heap->last_script_id());
+  script->set_id(Smi::FromInt(id));
   script->set_line_offset(Smi::FromInt(0));
   script->set_column_offset(Smi::FromInt(0));
   script->set_data(heap->undefined_value());
@@ -1080,6 +1096,16 @@
 }
 
 
+Handle<JSDataView> Factory::NewJSDataView() {
+  JSFunction* data_view_fun =
+      isolate()->context()->native_context()->data_view_fun();
+  CALL_HEAP_FUNCTION(
+      isolate(),
+      isolate()->heap()->AllocateJSObject(data_view_fun),
+      JSDataView);
+}
+
+
 Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type) {
   JSFunction* typed_array_fun;
   Context* native_context = isolate()->context()->native_context();
diff --git a/src/factory.h b/src/factory.h
index cde8432..0cb7157 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -152,6 +152,10 @@
   Handle<String> NewConsString(Handle<String> first,
                                Handle<String> second);
 
+  // Create a new sequential string containing the concatenation of the inputs.
+  Handle<String> NewFlatConcatString(Handle<String> first,
+                                     Handle<String> second);
+
   // Create a new string object which holds a substring of a string.
   Handle<String> NewSubString(Handle<String> str,
                               int begin,
@@ -328,6 +332,8 @@
 
   Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type);
 
+  Handle<JSDataView> NewJSDataView();
+
   Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
 
   // Change the type of the argument into a JS object/function and reinitialize.
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 179dea4..91186de 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -195,9 +195,6 @@
             true,
             "Optimize object size, Array shift, DOM strings and string +")
 DEFINE_bool(pretenuring, true, "allocate objects in old space")
-// TODO(hpayer): We will remove this flag as soon as we have pretenuring
-// support for specific allocation sites.
-DEFINE_bool(pretenuring_call_new, false, "pretenure call new")
 DEFINE_bool(track_fields, true, "track fields with only smi values")
 DEFINE_bool(track_double_fields, true, "track fields with double values")
 DEFINE_bool(track_heap_object_fields, true, "track fields with heap values")
@@ -267,8 +264,6 @@
             "eliminate unreachable code (hidden behind soft deopts)")
 DEFINE_bool(track_allocation_sites, true,
             "Use allocation site info to reduce transitions")
-DEFINE_bool(optimize_constructed_arrays, true,
-            "Use allocation site info on constructed arrays")
 DEFINE_bool(trace_osr, false, "trace on-stack replacement")
 DEFINE_int(stress_runs, 0, "number of stress runs")
 DEFINE_bool(optimize_closures, true, "optimize closures")
@@ -290,10 +285,10 @@
             "allow uint32 values on optimize frames if they are used only in "
             "safe operations")
 
-DEFINE_bool(parallel_recompilation, false,
+DEFINE_bool(parallel_recompilation, true,
             "optimizing hot functions asynchronously on a separate thread")
 DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
-DEFINE_int(parallel_recompilation_queue_length, 3,
+DEFINE_int(parallel_recompilation_queue_length, 8,
            "the length of the parallel compilation queue")
 DEFINE_int(parallel_recompilation_delay, 0,
            "artificial compilation delay in ms")
@@ -399,6 +394,7 @@
 DEFINE_bool(trace_opt_stats, false, "trace lazy optimization statistics")
 DEFINE_bool(opt, true, "use adaptive optimizations")
 DEFINE_bool(always_opt, false, "always try to optimize functions")
+DEFINE_bool(always_osr, false, "always try to OSR functions")
 DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
 DEFINE_bool(trace_deopt, false, "trace optimize function deoptimization")
 DEFINE_bool(trace_stub_failures, false,
@@ -480,7 +476,7 @@
 DEFINE_bool(trace_code_flushing, false, "trace code flushing progress")
 DEFINE_bool(age_code, true,
             "track un-executed functions to age code and flush only "
-            "old code")
+            "old code (required for code flushing)")
 DEFINE_bool(incremental_marking, true, "use incremental marking")
 DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
 DEFINE_bool(trace_incremental_marking, false,
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 1228ccf..8d10645 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -116,7 +116,7 @@
 }
 
 
-inline StackFrame::StackFrame(StackFrameIterator* iterator)
+inline StackFrame::StackFrame(StackFrameIteratorBase* iterator)
     : iterator_(iterator), isolate_(iterator_->isolate()) {
 }
 
@@ -136,22 +136,34 @@
 }
 
 
-inline EntryFrame::EntryFrame(StackFrameIterator* iterator)
+inline Address* StackFrame::ResolveReturnAddressLocation(Address* pc_address) {
+  if (return_address_location_resolver_ == NULL) {
+    return pc_address;
+  } else {
+    return reinterpret_cast<Address*>(
+        return_address_location_resolver_(
+            reinterpret_cast<uintptr_t>(pc_address)));
+  }
+}
+
+
+inline EntryFrame::EntryFrame(StackFrameIteratorBase* iterator)
     : StackFrame(iterator) {
 }
 
 
-inline EntryConstructFrame::EntryConstructFrame(StackFrameIterator* iterator)
+inline EntryConstructFrame::EntryConstructFrame(
+    StackFrameIteratorBase* iterator)
     : EntryFrame(iterator) {
 }
 
 
-inline ExitFrame::ExitFrame(StackFrameIterator* iterator)
+inline ExitFrame::ExitFrame(StackFrameIteratorBase* iterator)
     : StackFrame(iterator) {
 }
 
 
-inline StandardFrame::StandardFrame(StackFrameIterator* iterator)
+inline StandardFrame::StandardFrame(StackFrameIteratorBase* iterator)
     : StackFrame(iterator) {
 }
 
@@ -201,7 +213,7 @@
 }
 
 
-inline JavaScriptFrame::JavaScriptFrame(StackFrameIterator* iterator)
+inline JavaScriptFrame::JavaScriptFrame(StackFrameIteratorBase* iterator)
     : StandardFrame(iterator) {
 }
 
@@ -269,54 +281,51 @@
 }
 
 
-inline StubFrame::StubFrame(StackFrameIterator* iterator)
+inline StubFrame::StubFrame(StackFrameIteratorBase* iterator)
     : StandardFrame(iterator) {
 }
 
 
-inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
+inline OptimizedFrame::OptimizedFrame(StackFrameIteratorBase* iterator)
     : JavaScriptFrame(iterator) {
 }
 
 
 inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
-    StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
+    StackFrameIteratorBase* iterator) : JavaScriptFrame(iterator) {
 }
 
 
-inline InternalFrame::InternalFrame(StackFrameIterator* iterator)
+inline InternalFrame::InternalFrame(StackFrameIteratorBase* iterator)
     : StandardFrame(iterator) {
 }
 
 
 inline StubFailureTrampolineFrame::StubFailureTrampolineFrame(
-    StackFrameIterator* iterator) : StandardFrame(iterator) {
+    StackFrameIteratorBase* iterator) : StandardFrame(iterator) {
 }
 
 
-inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator)
+inline ConstructFrame::ConstructFrame(StackFrameIteratorBase* iterator)
     : InternalFrame(iterator) {
 }
 
 
-template<typename Iterator>
-inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
+inline JavaScriptFrameIterator::JavaScriptFrameIterator(
     Isolate* isolate)
     : iterator_(isolate) {
   if (!done()) Advance();
 }
 
 
-template<typename Iterator>
-inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
+inline JavaScriptFrameIterator::JavaScriptFrameIterator(
     Isolate* isolate, ThreadLocalTop* top)
     : iterator_(isolate, top) {
   if (!done()) Advance();
 }
 
 
-template<typename Iterator>
-inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
+inline JavaScriptFrame* JavaScriptFrameIterator::frame() const {
   // TODO(1233797): The frame hierarchy needs to change. It's
   // problematic that we can't use the safe-cast operator to cast to
   // the JavaScript frame type, because we may encounter arguments
@@ -327,43 +336,10 @@
 }
 
 
-template<typename Iterator>
-JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
-    Isolate* isolate, StackFrame::Id id)
-    : iterator_(isolate) {
-  AdvanceToId(id);
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::Advance() {
-  do {
-    iterator_.Advance();
-  } while (!iterator_.done() && !iterator_.frame()->is_java_script());
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::AdvanceToArgumentsFrame() {
-  if (!frame()->has_adapted_arguments()) return;
-  iterator_.Advance();
-  ASSERT(iterator_.frame()->is_arguments_adaptor());
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::AdvanceToId(StackFrame::Id id) {
-  while (!done()) {
-    Advance();
-    if (frame()->id() == id) return;
-  }
-}
-
-
-template<typename Iterator>
-void JavaScriptFrameIteratorTemp<Iterator>::Reset() {
-  iterator_.Reset();
-  if (!done()) Advance();
+inline JavaScriptFrame* SafeStackFrameIterator::frame() const {
+  ASSERT(!done());
+  ASSERT(frame_->is_java_script());
+  return static_cast<JavaScriptFrame*>(frame_);
 }
 
 
diff --git a/src/frames.cc b/src/frames.cc
index 53f5108..edd5ddd 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -43,19 +43,8 @@
 namespace internal {
 
 
-static ReturnAddressLocationResolver return_address_location_resolver = NULL;
-
-
-// Resolves pc_address through the resolution address function if one is set.
-static inline Address* ResolveReturnAddressLocation(Address* pc_address) {
-  if (return_address_location_resolver == NULL) {
-    return pc_address;
-  } else {
-    return reinterpret_cast<Address*>(
-        return_address_location_resolver(
-            reinterpret_cast<uintptr_t>(pc_address)));
-  }
-}
+ReturnAddressLocationResolver
+    StackFrame::return_address_location_resolver_ = NULL;
 
 
 // Iterator that supports traversing the stack handlers of a
@@ -88,39 +77,29 @@
 
 
 #define INITIALIZE_SINGLETON(type, field) field##_(this),
-StackFrameIterator::StackFrameIterator(Isolate* isolate)
+StackFrameIteratorBase::StackFrameIteratorBase(Isolate* isolate,
+                                               bool can_access_heap_objects)
     : isolate_(isolate),
       STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
       frame_(NULL), handler_(NULL),
-      thread_(isolate_->thread_local_top()),
-      fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
-  Reset();
+      can_access_heap_objects_(can_access_heap_objects) {
 }
-StackFrameIterator::StackFrameIterator(Isolate* isolate, ThreadLocalTop* t)
-    : isolate_(isolate),
-      STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
-      frame_(NULL), handler_(NULL), thread_(t),
-      fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
-  Reset();
-}
-StackFrameIterator::StackFrameIterator(Isolate* isolate,
-                                       bool use_top, Address fp, Address sp)
-    : isolate_(isolate),
-      STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
-      frame_(NULL), handler_(NULL),
-      thread_(use_top ? isolate_->thread_local_top() : NULL),
-      fp_(use_top ? NULL : fp), sp_(sp),
-      advance_(use_top ? &StackFrameIterator::AdvanceWithHandler :
-               &StackFrameIterator::AdvanceWithoutHandler) {
-  if (use_top || fp != NULL) {
-    Reset();
-  }
-}
-
 #undef INITIALIZE_SINGLETON
 
 
-void StackFrameIterator::AdvanceWithHandler() {
+StackFrameIterator::StackFrameIterator(Isolate* isolate)
+    : StackFrameIteratorBase(isolate, true) {
+  Reset(isolate->thread_local_top());
+}
+
+
+StackFrameIterator::StackFrameIterator(Isolate* isolate, ThreadLocalTop* t)
+    : StackFrameIteratorBase(isolate, true) {
+  Reset(t);
+}
+
+
+void StackFrameIterator::Advance() {
   ASSERT(!done());
   // Compute the state of the calling frame before restoring
   // callee-saved registers and unwinding handlers. This allows the
@@ -143,37 +122,17 @@
 }
 
 
-void StackFrameIterator::AdvanceWithoutHandler() {
-  // A simpler version of Advance which doesn't care about handler.
-  ASSERT(!done());
+void StackFrameIterator::Reset(ThreadLocalTop* top) {
   StackFrame::State state;
-  StackFrame::Type type = frame_->GetCallerState(&state);
-  frame_ = SingletonFor(type, &state);
-}
-
-
-void StackFrameIterator::Reset() {
-  StackFrame::State state;
-  StackFrame::Type type;
-  if (thread_ != NULL) {
-    type = ExitFrame::GetStateForFramePointer(
-        Isolate::c_entry_fp(thread_), &state);
-    handler_ = StackHandler::FromAddress(
-        Isolate::handler(thread_));
-  } else {
-    ASSERT(fp_ != NULL);
-    state.fp = fp_;
-    state.sp = sp_;
-    state.pc_address = ResolveReturnAddressLocation(
-        reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_)));
-    type = StackFrame::ComputeType(isolate(), &state);
-  }
+  StackFrame::Type type = ExitFrame::GetStateForFramePointer(
+      Isolate::c_entry_fp(top), &state);
+  handler_ = StackHandler::FromAddress(Isolate::handler(top));
   if (SingletonFor(type) == NULL) return;
   frame_ = SingletonFor(type, &state);
 }
 
 
-StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type,
+StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type,
                                              StackFrame::State* state) {
   if (type == StackFrame::NONE) return NULL;
   StackFrame* result = SingletonFor(type);
@@ -183,7 +142,7 @@
 }
 
 
-StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type) {
+StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
 #define FRAME_TYPE_CASE(type, field) \
   case StackFrame::type: result = &field##_; break;
 
@@ -202,6 +161,33 @@
 // -------------------------------------------------------------------------
 
 
+JavaScriptFrameIterator::JavaScriptFrameIterator(
+    Isolate* isolate, StackFrame::Id id)
+    : iterator_(isolate) {
+  while (!done()) {
+    Advance();
+    if (frame()->id() == id) return;
+  }
+}
+
+
+void JavaScriptFrameIterator::Advance() {
+  do {
+    iterator_.Advance();
+  } while (!iterator_.done() && !iterator_.frame()->is_java_script());
+}
+
+
+void JavaScriptFrameIterator::AdvanceToArgumentsFrame() {
+  if (!frame()->has_adapted_arguments()) return;
+  iterator_.Advance();
+  ASSERT(iterator_.frame()->is_arguments_adaptor());
+}
+
+
+// -------------------------------------------------------------------------
+
+
 StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate)
     : JavaScriptFrameIterator(isolate) {
   if (!done() && !IsValidFrame()) Advance();
@@ -228,85 +214,61 @@
 // -------------------------------------------------------------------------
 
 
-bool SafeStackFrameIterator::ExitFrameValidator::IsValidFP(Address fp) {
-  if (!validator_.IsValid(fp)) return false;
-  Address sp = ExitFrame::ComputeStackPointer(fp);
-  if (!validator_.IsValid(sp)) return false;
-  StackFrame::State state;
-  ExitFrame::FillState(fp, sp, &state);
-  if (!validator_.IsValid(reinterpret_cast<Address>(state.pc_address))) {
-    return false;
-  }
-  return *state.pc_address != NULL;
-}
-
-
-SafeStackFrameIterator::ActiveCountMaintainer::ActiveCountMaintainer(
-    Isolate* isolate)
-    : isolate_(isolate) {
-  isolate_->set_safe_stack_iterator_counter(
-      isolate_->safe_stack_iterator_counter() + 1);
-}
-
-
-SafeStackFrameIterator::ActiveCountMaintainer::~ActiveCountMaintainer() {
-  isolate_->set_safe_stack_iterator_counter(
-      isolate_->safe_stack_iterator_counter() - 1);
-}
-
-
 SafeStackFrameIterator::SafeStackFrameIterator(
     Isolate* isolate,
     Address fp, Address sp, Address low_bound, Address high_bound) :
-    maintainer_(isolate),
-    stack_validator_(low_bound, high_bound),
-    is_valid_top_(IsValidTop(isolate, low_bound, high_bound)),
-    is_valid_fp_(IsWithinBounds(low_bound, high_bound, fp)),
-    is_working_iterator_(is_valid_top_ || is_valid_fp_),
-    iteration_done_(!is_working_iterator_),
-    iterator_(isolate, is_valid_top_, is_valid_fp_ ? fp : NULL, sp) {
-}
-
-bool SafeStackFrameIterator::is_active(Isolate* isolate) {
-  return isolate->safe_stack_iterator_counter() > 0;
-}
-
-
-bool SafeStackFrameIterator::IsValidTop(Isolate* isolate,
-                                        Address low_bound, Address high_bound) {
+    StackFrameIteratorBase(isolate, false),
+    low_bound_(low_bound), high_bound_(high_bound) {
+  StackFrame::State state;
+  StackFrame::Type type;
   ThreadLocalTop* top = isolate->thread_local_top();
+  if (IsValidTop(top)) {
+    type = ExitFrame::GetStateForFramePointer(Isolate::c_entry_fp(top), &state);
+  } else if (IsValidStackAddress(fp)) {
+    ASSERT(fp != NULL);
+    state.fp = fp;
+    state.sp = sp;
+    state.pc_address = StackFrame::ResolveReturnAddressLocation(
+        reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp)));
+    type = StackFrame::ComputeType(this, &state);
+  } else {
+    return;
+  }
+  if (SingletonFor(type) == NULL) return;
+  frame_ = SingletonFor(type, &state);
+
+  if (!done()) Advance();
+}
+
+
+bool SafeStackFrameIterator::IsValidTop(ThreadLocalTop* top) const {
   Address fp = Isolate::c_entry_fp(top);
-  ExitFrameValidator validator(low_bound, high_bound);
-  if (!validator.IsValidFP(fp)) return false;
+  if (!IsValidExitFrame(fp)) return false;
+  // There should be at least one JS_ENTRY stack handler.
   return Isolate::handler(top) != NULL;
 }
 
 
-void SafeStackFrameIterator::Advance() {
-  ASSERT(is_working_iterator_);
+void SafeStackFrameIterator::AdvanceOneFrame() {
   ASSERT(!done());
-  StackFrame* last_frame = iterator_.frame();
+  StackFrame* last_frame = frame_;
   Address last_sp = last_frame->sp(), last_fp = last_frame->fp();
-  // Before advancing to the next stack frame, perform pointer validity tests
-  iteration_done_ = !IsValidFrame(last_frame) ||
-      !CanIterateHandles(last_frame, iterator_.handler()) ||
-      !IsValidCaller(last_frame);
-  if (iteration_done_) return;
+  // Before advancing to the next stack frame, perform pointer validity tests.
+  if (!IsValidFrame(last_frame) || !IsValidCaller(last_frame)) {
+    frame_ = NULL;
+    return;
+  }
 
-  iterator_.Advance();
-  if (iterator_.done()) return;
-  // Check that we have actually moved to the previous frame in the stack
-  StackFrame* prev_frame = iterator_.frame();
-  iteration_done_ = prev_frame->sp() < last_sp || prev_frame->fp() < last_fp;
-}
+  // Advance to the previous frame.
+  StackFrame::State state;
+  StackFrame::Type type = frame_->GetCallerState(&state);
+  frame_ = SingletonFor(type, &state);
+  if (frame_ == NULL) return;
 
-
-bool SafeStackFrameIterator::CanIterateHandles(StackFrame* frame,
-                                               StackHandler* handler) {
-  // If StackIterator iterates over StackHandles, verify that
-  // StackHandlerIterator can be instantiated (see StackHandlerIterator
-  // constructor.)
-  return !is_valid_top_ || (frame->sp() <= handler->address());
+  // Check that we have actually moved to the previous frame in the stack.
+  if (frame_->sp() < last_sp || frame_->fp() < last_fp) {
+    frame_ = NULL;
+  }
 }
 
 
@@ -323,8 +285,7 @@
     // sure that caller FP address is valid.
     Address caller_fp = Memory::Address_at(
         frame->fp() + EntryFrameConstants::kCallerFPOffset);
-    ExitFrameValidator validator(stack_validator_);
-    if (!validator.IsValidFP(caller_fp)) return false;
+    if (!IsValidExitFrame(caller_fp)) return false;
   } else if (frame->is_arguments_adaptor()) {
     // See ArgumentsAdaptorFrame::GetCallerStackPointer. It assumes that
     // the number of arguments is stored on stack as Smi. We need to check
@@ -337,14 +298,28 @@
   }
   frame->ComputeCallerState(&state);
   return IsValidStackAddress(state.sp) && IsValidStackAddress(state.fp) &&
-      iterator_.SingletonFor(frame->GetCallerState(&state)) != NULL;
+      SingletonFor(frame->GetCallerState(&state)) != NULL;
 }
 
 
-void SafeStackFrameIterator::Reset() {
-  if (is_working_iterator_) {
-    iterator_.Reset();
-    iteration_done_ = false;
+bool SafeStackFrameIterator::IsValidExitFrame(Address fp) const {
+  if (!IsValidStackAddress(fp)) return false;
+  Address sp = ExitFrame::ComputeStackPointer(fp);
+  if (!IsValidStackAddress(sp)) return false;
+  StackFrame::State state;
+  ExitFrame::FillState(fp, sp, &state);
+  if (!IsValidStackAddress(reinterpret_cast<Address>(state.pc_address))) {
+    return false;
+  }
+  return *state.pc_address != NULL;
+}
+
+
+void SafeStackFrameIterator::Advance() {
+  while (true) {
+    AdvanceOneFrame();
+    if (done()) return;
+    if (frame_->is_java_script()) return;
   }
 }
 
@@ -352,23 +327,6 @@
 // -------------------------------------------------------------------------
 
 
-SafeStackTraceFrameIterator::SafeStackTraceFrameIterator(
-    Isolate* isolate,
-    Address fp, Address sp, Address low_bound, Address high_bound) :
-    SafeJavaScriptFrameIterator(isolate, fp, sp, low_bound, high_bound) {
-  if (!done() && !frame()->is_java_script()) Advance();
-}
-
-
-void SafeStackTraceFrameIterator::Advance() {
-  while (true) {
-    SafeJavaScriptFrameIterator::Advance();
-    if (done()) return;
-    if (frame()->is_java_script()) return;
-  }
-}
-
-
 Code* StackFrame::GetSafepointData(Isolate* isolate,
                                    Address inner_pointer,
                                    SafepointEntry* safepoint_entry,
@@ -420,12 +378,13 @@
 
 void StackFrame::SetReturnAddressLocationResolver(
     ReturnAddressLocationResolver resolver) {
-  ASSERT(return_address_location_resolver == NULL);
-  return_address_location_resolver = resolver;
+  ASSERT(return_address_location_resolver_ == NULL);
+  return_address_location_resolver_ = resolver;
 }
 
 
-StackFrame::Type StackFrame::ComputeType(Isolate* isolate, State* state) {
+StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
+                                         State* state) {
   ASSERT(state->fp != NULL);
   if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
     return ARGUMENTS_ADAPTOR;
@@ -440,8 +399,9 @@
     // frames as normal JavaScript frames to avoid having to look
     // into the heap to determine the state. This is safe as long
     // as nobody tries to GC...
-    if (SafeStackFrameIterator::is_active(isolate)) return JAVA_SCRIPT;
-    Code::Kind kind = GetContainingCode(isolate, *(state->pc_address))->kind();
+    if (!iterator->can_access_heap_objects_) return JAVA_SCRIPT;
+    Code::Kind kind = GetContainingCode(iterator->isolate(),
+                                        *(state->pc_address))->kind();
     ASSERT(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION);
     return (kind == Code::OPTIMIZED_FUNCTION) ? OPTIMIZED : JAVA_SCRIPT;
   }
@@ -449,10 +409,16 @@
 }
 
 
+#ifdef DEBUG
+bool StackFrame::can_access_heap_objects() const {
+  return iterator_->can_access_heap_objects_;
+}
+#endif
+
 
 StackFrame::Type StackFrame::GetCallerState(State* state) const {
   ComputeCallerState(state);
-  return ComputeType(isolate(), state);
+  return ComputeType(iterator_, state);
 }
 
 
@@ -545,6 +511,11 @@
 }
 
 
+Address ExitFrame::ComputeStackPointer(Address fp) {
+  return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
+}
+
+
 void ExitFrame::FillState(Address fp, Address sp, State* state) {
   state->sp = sp;
   state->fp = fp;
@@ -607,7 +578,7 @@
 void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
   // Make sure that we're not doing "safe" stack frame iteration. We cannot
   // possibly find pointers in optimized frames in that state.
-  ASSERT(!SafeStackFrameIterator::is_active(isolate()));
+  ASSERT(can_access_heap_objects());
 
   // Compute the safepoint information.
   unsigned stack_slots = 0;
@@ -734,12 +705,12 @@
 
 Code* JavaScriptFrame::unchecked_code() const {
   JSFunction* function = JSFunction::cast(this->function());
-  return function->unchecked_code();
+  return function->code();
 }
 
 
 int JavaScriptFrame::GetNumberOfIncomingArguments() const {
-  ASSERT(!SafeStackFrameIterator::is_active(isolate()) &&
+  ASSERT(can_access_heap_objects() &&
          isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
 
   JSFunction* function = JSFunction::cast(this->function());
diff --git a/src/frames.h b/src/frames.h
index f09c24a..9ca218a 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -47,7 +47,7 @@
 
 
 // Forward declarations.
-class StackFrameIterator;
+class StackFrameIteratorBase;
 class ThreadLocalTop;
 class Isolate;
 
@@ -297,6 +297,10 @@
   static void SetReturnAddressLocationResolver(
       ReturnAddressLocationResolver resolver);
 
+  // Resolves pc_address through the resolution address function if one is set.
+  static inline Address* ResolveReturnAddressLocation(Address* pc_address);
+
+
   // Printing support.
   enum PrintMode { OVERVIEW, DETAILS };
   virtual void Print(StringStream* accumulator,
@@ -306,7 +310,7 @@
   Isolate* isolate() const { return isolate_; }
 
  protected:
-  inline explicit StackFrame(StackFrameIterator* iterator);
+  inline explicit StackFrame(StackFrameIteratorBase* iterator);
   virtual ~StackFrame() { }
 
   // Compute the stack pointer for the calling frame.
@@ -321,13 +325,19 @@
   inline StackHandler* top_handler() const;
 
   // Compute the stack frame type for the given state.
-  static Type ComputeType(Isolate* isolate, State* state);
+  static Type ComputeType(const StackFrameIteratorBase* iterator, State* state);
+
+#ifdef DEBUG
+  bool can_access_heap_objects() const;
+#endif
 
  private:
-  const StackFrameIterator* iterator_;
+  const StackFrameIteratorBase* iterator_;
   Isolate* isolate_;
   State state_;
 
+  static ReturnAddressLocationResolver return_address_location_resolver_;
+
   // Fill in the state of the calling frame.
   virtual void ComputeCallerState(State* state) const = 0;
 
@@ -337,6 +347,7 @@
   static const intptr_t kIsolateTag = 1;
 
   friend class StackFrameIterator;
+  friend class StackFrameIteratorBase;
   friend class StackHandlerIterator;
   friend class SafeStackFrameIterator;
 
@@ -362,7 +373,7 @@
   virtual void SetCallerFp(Address caller_fp);
 
  protected:
-  inline explicit EntryFrame(StackFrameIterator* iterator);
+  inline explicit EntryFrame(StackFrameIteratorBase* iterator);
 
   // The caller stack pointer for entry frames is always zero. The
   // real information about the caller frame is available through the
@@ -373,7 +384,7 @@
   virtual void ComputeCallerState(State* state) const;
   virtual Type GetCallerState(State* state) const;
 
-  friend class StackFrameIterator;
+  friend class StackFrameIteratorBase;
 };
 
 
@@ -389,10 +400,10 @@
   }
 
  protected:
-  inline explicit EntryConstructFrame(StackFrameIterator* iterator);
+  inline explicit EntryConstructFrame(StackFrameIteratorBase* iterator);
 
  private:
-  friend class StackFrameIterator;
+  friend class StackFrameIteratorBase;
 };
 
 
@@ -423,14 +434,14 @@
   static void FillState(Address fp, Address sp, State* state);
 
  protected:
-  inline explicit ExitFrame(StackFrameIterator* iterator);
+  inline explicit ExitFrame(StackFrameIteratorBase* iterator);
 
   virtual Address GetCallerStackPointer() const;
 
  private:
   virtual void ComputeCallerState(State* state) const;
 
-  friend class StackFrameIterator;
+  friend class StackFrameIteratorBase;
 };
 
 
@@ -456,7 +467,7 @@
   }
 
  protected:
-  inline explicit StandardFrame(StackFrameIterator* iterator);
+  inline explicit StandardFrame(StackFrameIteratorBase* iterator);
 
   virtual void ComputeCallerState(State* state) const;
 
@@ -493,7 +504,7 @@
 
  private:
   friend class StackFrame;
-  friend class StackFrameIterator;
+  friend class SafeStackFrameIterator;
 };
 
 
@@ -599,7 +610,7 @@
                        bool print_line_number);
 
  protected:
-  inline explicit JavaScriptFrame(StackFrameIterator* iterator);
+  inline explicit JavaScriptFrame(StackFrameIteratorBase* iterator);
 
   virtual Address GetCallerStackPointer() const;
 
@@ -612,7 +623,7 @@
  private:
   inline Object* function_slot_object() const;
 
-  friend class StackFrameIterator;
+  friend class StackFrameIteratorBase;
 };
 
 
@@ -627,13 +638,13 @@
   virtual Code* unchecked_code() const;
 
  protected:
-  inline explicit StubFrame(StackFrameIterator* iterator);
+  inline explicit StubFrame(StackFrameIteratorBase* iterator);
 
   virtual Address GetCallerStackPointer() const;
 
   virtual int GetNumberOfIncomingArguments() const;
 
-  friend class StackFrameIterator;
+  friend class StackFrameIteratorBase;
 };
 
 
@@ -656,12 +667,12 @@
   DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
 
  protected:
-  inline explicit OptimizedFrame(StackFrameIterator* iterator);
+  inline explicit OptimizedFrame(StackFrameIteratorBase* iterator);
 
  private:
   JSFunction* LiteralAt(FixedArray* literal_array, int literal_id);
 
-  friend class StackFrameIterator;
+  friend class StackFrameIteratorBase;
 };
 
 
@@ -686,14 +697,14 @@
                      int index) const;
 
  protected:
-  inline explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator);
+  inline explicit ArgumentsAdaptorFrame(StackFrameIteratorBase* iterator);
 
   virtual int GetNumberOfIncomingArguments() const;
 
   virtual Address GetCallerStackPointer() const;
 
  private:
-  friend class StackFrameIterator;
+  friend class StackFrameIteratorBase;
 };
 
 
@@ -713,12 +724,12 @@
   }
 
  protected:
-  inline explicit InternalFrame(StackFrameIterator* iterator);
+  inline explicit InternalFrame(StackFrameIteratorBase* iterator);
 
   virtual Address GetCallerStackPointer() const;
 
  private:
-  friend class StackFrameIterator;
+  friend class StackFrameIteratorBase;
 };
 
 
@@ -746,12 +757,12 @@
 
  protected:
   inline explicit StubFailureTrampolineFrame(
-      StackFrameIterator* iterator);
+      StackFrameIteratorBase* iterator);
 
   virtual Address GetCallerStackPointer() const;
 
  private:
-  friend class StackFrameIterator;
+  friend class StackFrameIteratorBase;
 };
 
 
@@ -767,50 +778,30 @@
   }
 
  protected:
-  inline explicit ConstructFrame(StackFrameIterator* iterator);
+  inline explicit ConstructFrame(StackFrameIteratorBase* iterator);
 
  private:
-  friend class StackFrameIterator;
+  friend class StackFrameIteratorBase;
 };
 
 
-class StackFrameIterator BASE_EMBEDDED {
+class StackFrameIteratorBase BASE_EMBEDDED {
  public:
-  // An iterator that iterates over the isolate's current thread's stack,
-  explicit StackFrameIterator(Isolate* isolate);
-
-  // An iterator that iterates over a given thread's stack.
-  StackFrameIterator(Isolate* isolate, ThreadLocalTop* t);
-
-  // An iterator that can start from a given FP address.
-  // If use_top, then work as usual, if fp isn't NULL, use it,
-  // otherwise, do nothing.
-  StackFrameIterator(Isolate* isolate, bool use_top, Address fp, Address sp);
-
-  StackFrame* frame() const {
-    ASSERT(!done());
-    return frame_;
-  }
-
   Isolate* isolate() const { return isolate_; }
 
   bool done() const { return frame_ == NULL; }
-  void Advance() { (this->*advance_)(); }
 
-  // Go back to the first frame.
-  void Reset();
+ protected:
+  // An iterator that iterates over a given thread's stack.
+  StackFrameIteratorBase(Isolate* isolate, bool can_access_heap_objects);
 
- private:
   Isolate* isolate_;
 #define DECLARE_SINGLETON(ignore, type) type type##_;
   STACK_FRAME_TYPE_LIST(DECLARE_SINGLETON)
 #undef DECLARE_SINGLETON
   StackFrame* frame_;
   StackHandler* handler_;
-  ThreadLocalTop* thread_;
-  Address fp_;
-  Address sp_;
-  void (StackFrameIterator::*advance_)();
+  const bool can_access_heap_objects_;
 
   StackHandler* handler() const {
     ASSERT(!done());
@@ -822,44 +813,40 @@
   // A helper function, can return a NULL pointer.
   StackFrame* SingletonFor(StackFrame::Type type);
 
-  void AdvanceWithHandler();
-  void AdvanceWithoutHandler();
-
+ private:
   friend class StackFrame;
-  friend class SafeStackFrameIterator;
+  DISALLOW_COPY_AND_ASSIGN(StackFrameIteratorBase);
+};
+
+
+class StackFrameIterator: public StackFrameIteratorBase {
+ public:
+  // An iterator that iterates over the isolate's current thread's stack,
+  explicit StackFrameIterator(Isolate* isolate);
+  // An iterator that iterates over a given thread's stack.
+  StackFrameIterator(Isolate* isolate, ThreadLocalTop* t);
+
+  StackFrame* frame() const {
+    ASSERT(!done());
+    return frame_;
+  }
+  void Advance();
+
+ private:
+  // Go back to the first frame.
+  void Reset(ThreadLocalTop* top);
+
   DISALLOW_COPY_AND_ASSIGN(StackFrameIterator);
 };
 
 
 // Iterator that supports iterating through all JavaScript frames.
-template<typename Iterator>
-class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
+class JavaScriptFrameIterator BASE_EMBEDDED {
  public:
-  inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate);
-
-  inline JavaScriptFrameIteratorTemp(Isolate* isolate, ThreadLocalTop* top);
-
+  inline explicit JavaScriptFrameIterator(Isolate* isolate);
+  inline JavaScriptFrameIterator(Isolate* isolate, ThreadLocalTop* top);
   // Skip frames until the frame with the given id is reached.
-  explicit JavaScriptFrameIteratorTemp(StackFrame::Id id) { AdvanceToId(id); }
-
-  inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
-
-  JavaScriptFrameIteratorTemp(Address fp,
-                              Address sp,
-                              Address low_bound,
-                              Address high_bound) :
-      iterator_(fp, sp, low_bound, high_bound) {
-    if (!done()) Advance();
-  }
-
-  JavaScriptFrameIteratorTemp(Isolate* isolate,
-                              Address fp,
-                              Address sp,
-                              Address low_bound,
-                              Address high_bound) :
-      iterator_(isolate, fp, sp, low_bound, high_bound) {
-    if (!done()) Advance();
-  }
+  JavaScriptFrameIterator(Isolate* isolate, StackFrame::Id id);
 
   inline JavaScriptFrame* frame() const;
 
@@ -871,26 +858,17 @@
   // arguments.
   void AdvanceToArgumentsFrame();
 
-  // Go back to the first frame.
-  void Reset();
-
  private:
-  inline void AdvanceToId(StackFrame::Id id);
-
-  Iterator iterator_;
+  StackFrameIterator iterator_;
 };
 
 
-typedef JavaScriptFrameIteratorTemp<StackFrameIterator> JavaScriptFrameIterator;
-
-
 // NOTE: The stack trace frame iterator is an iterator that only
 // traverse proper JavaScript frames; that is JavaScript frames that
 // have proper JavaScript functions. This excludes the problematic
 // functions in runtime.js.
 class StackTraceFrameIterator: public JavaScriptFrameIterator {
  public:
-  StackTraceFrameIterator();
   explicit StackTraceFrameIterator(Isolate* isolate);
   void Advance();
 
@@ -899,95 +877,28 @@
 };
 
 
-class SafeStackFrameIterator BASE_EMBEDDED {
+class SafeStackFrameIterator: public StackFrameIteratorBase {
  public:
   SafeStackFrameIterator(Isolate* isolate,
                          Address fp, Address sp,
                          Address low_bound, Address high_bound);
 
-  StackFrame* frame() const {
-    ASSERT(is_working_iterator_);
-    return iterator_.frame();
-  }
-
-  bool done() const { return iteration_done_ ? true : iterator_.done(); }
-
+  inline JavaScriptFrame* frame() const;
   void Advance();
-  void Reset();
-
-  static bool is_active(Isolate* isolate);
-
-  static bool IsWithinBounds(
-      Address low_bound, Address high_bound, Address addr) {
-    return low_bound <= addr && addr <= high_bound;
-  }
 
  private:
-  class StackAddressValidator {
-   public:
-    StackAddressValidator(Address low_bound, Address high_bound)
-        : low_bound_(low_bound), high_bound_(high_bound) { }
-    bool IsValid(Address addr) const {
-      return IsWithinBounds(low_bound_, high_bound_, addr);
-    }
-   private:
-    Address low_bound_;
-    Address high_bound_;
-  };
-
-  class ExitFrameValidator {
-   public:
-    explicit ExitFrameValidator(const StackAddressValidator& validator)
-        : validator_(validator) { }
-    ExitFrameValidator(Address low_bound, Address high_bound)
-        : validator_(low_bound, high_bound) { }
-    bool IsValidFP(Address fp);
-   private:
-    StackAddressValidator validator_;
-  };
+  void AdvanceOneFrame();
 
   bool IsValidStackAddress(Address addr) const {
-    return stack_validator_.IsValid(addr);
+    return low_bound_ <= addr && addr <= high_bound_;
   }
-  bool CanIterateHandles(StackFrame* frame, StackHandler* handler);
   bool IsValidFrame(StackFrame* frame) const;
   bool IsValidCaller(StackFrame* frame);
-  static bool IsValidTop(Isolate* isolate,
-                         Address low_bound, Address high_bound);
+  bool IsValidExitFrame(Address fp) const;
+  bool IsValidTop(ThreadLocalTop* top) const;
 
-  // This is a nasty hack to make sure the active count is incremented
-  // before the constructor for the embedded iterator is invoked. This
-  // is needed because the constructor will start looking at frames
-  // right away and we need to make sure it doesn't start inspecting
-  // heap objects.
-  class ActiveCountMaintainer BASE_EMBEDDED {
-   public:
-    explicit ActiveCountMaintainer(Isolate* isolate);
-    ~ActiveCountMaintainer();
-   private:
-    Isolate* isolate_;
-  };
-
-  ActiveCountMaintainer maintainer_;
-  StackAddressValidator stack_validator_;
-  const bool is_valid_top_;
-  const bool is_valid_fp_;
-  const bool is_working_iterator_;
-  bool iteration_done_;
-  StackFrameIterator iterator_;
-};
-
-
-typedef JavaScriptFrameIteratorTemp<SafeStackFrameIterator>
-    SafeJavaScriptFrameIterator;
-
-
-class SafeStackTraceFrameIterator: public SafeJavaScriptFrameIterator {
- public:
-  explicit SafeStackTraceFrameIterator(Isolate* isolate,
-                                       Address fp, Address sp,
-                                       Address low_bound, Address high_bound);
-  void Advance();
+  const Address low_bound_;
+  const Address high_bound_;
 };
 
 
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 49a3194..7336261 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -1545,7 +1545,7 @@
 
 void FullCodeGenerator::VisitLiteral(Literal* expr) {
   Comment cmnt(masm_, "[ Literal");
-  context()->Plug(expr->handle());
+  context()->Plug(expr->value());
 }
 
 
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index 5717a96..dee115c 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -1916,8 +1916,7 @@
 }
 
 
-static JITCodeEntry* CreateELFObject(CodeDescription* desc, Zone* zone) {
-  ZoneScope zone_scope(zone, DELETE_ON_EXIT);
+static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
 #ifdef __MACH_O
   MachO mach_o;
   Writer w(&mach_o);
@@ -1930,11 +1929,12 @@
 
   mach_o.Write(&w, desc->CodeStart(), desc->CodeSize());
 #else
-  ELF elf(zone);
+  Zone zone(isolate);
+  ELF elf(&zone);
   Writer w(&elf);
 
   int text_section_index = elf.AddSection(
-      new(zone) FullHeaderELFSection(
+      new(&zone) FullHeaderELFSection(
           ".text",
           ELFSection::TYPE_NOBITS,
           kCodeAlignment,
@@ -1942,11 +1942,11 @@
           0,
           desc->CodeSize(),
           ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC),
-      zone);
+      &zone);
 
-  CreateSymbolsTable(desc, zone, &elf, text_section_index);
+  CreateSymbolsTable(desc, &zone, &elf, text_section_index);
 
-  CreateDWARFSections(desc, zone, &elf);
+  CreateDWARFSections(desc, &zone, &elf);
 
   elf.Write(&w);
 #endif
@@ -2083,8 +2083,8 @@
   }
 
   AddUnwindInfo(&code_desc);
-  Zone* zone = code->GetIsolate()->runtime_zone();
-  JITCodeEntry* entry = CreateELFObject(&code_desc, zone);
+  Isolate* isolate = code->GetIsolate();
+  JITCodeEntry* entry = CreateELFObject(&code_desc, isolate);
   ASSERT(!IsLineInfoTagged(entry));
 
   delete lineinfo;
diff --git a/src/global-handles.h b/src/global-handles.h
index ac26e00..cd75133 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -152,7 +152,7 @@
   int NumberOfGlobalObjectWeakHandles();
 
   // Returns the current number of handles to global objects.
-  int NumberOfGlobalHandles() {
+  int global_handles_count() const {
     return number_of_global_handles_;
   }
 
diff --git a/src/globals.h b/src/globals.h
index 59931bf..573e19a 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -148,6 +148,19 @@
 #endif
 #endif
 
+// Determine architecture endiannes (we only support little-endian).
+#if defined(V8_TARGET_ARCH_IA32)
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif defined(V8_TARGET_ARCH_X64)
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif defined(V8_TARGET_ARCH_ARM)
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif defined(V8_TARGET_ARCH_MIPS)
+#define V8_TARGET_LITTLE_ENDIAN 1
+#else
+#error Unknown target architecture endiannes
+#endif
+
 // Support for alternative bool type. This is only enabled if the code is
 // compiled with USE_MYBOOL defined. This catches some nasty type bugs.
 // For instance, 'bool b = "false";' results in b == true! This is a hidden
@@ -399,6 +412,18 @@
 };
 
 
+// A simple Maybe type, that can be passed by value.
+template<class T>
+struct Maybe {
+  Maybe() : has_value(false) {}
+  explicit Maybe(T t) : has_value(true), value(t) {}
+  Maybe(bool has, T t) : has_value(has), value(t) {}
+
+  bool has_value;
+  T value;
+};
+
+
 // The Strict Mode (ECMA-262 5th edition, 4.2.2).
 //
 // This flag is used in the backend to represent the language mode. So far
diff --git a/src/handles.cc b/src/handles.cc
index 6859cd4..382b3fb 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -557,11 +557,7 @@
     v8::NamedPropertyEnumerator enum_fun =
         v8::ToCData<v8::NamedPropertyEnumerator>(interceptor->enumerator());
     LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object));
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      result = args.Call(enum_fun);
-    }
+    result = args.Call(enum_fun);
   }
 #if ENABLE_EXTRA_CHECKS
   CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
@@ -583,14 +579,10 @@
     v8::IndexedPropertyEnumerator enum_fun =
         v8::ToCData<v8::IndexedPropertyEnumerator>(interceptor->enumerator());
     LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object));
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      result = args.Call(enum_fun);
+    result = args.Call(enum_fun);
 #if ENABLE_EXTRA_CHECKS
-      CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
+    CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
 #endif
-    }
   }
   return v8::Local<v8::Array>::New(reinterpret_cast<v8::Isolate*>(isolate),
                                    result);
diff --git a/src/heap-inl.h b/src/heap-inl.h
index eb9d9c0..92ae8e5 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -550,7 +550,7 @@
     if (amount >= 0) {
       amount_of_external_allocated_memory_ = amount;
     } else {
-      // Give up and reset the counters in case of an overflow.
+      // Give up and reset the counters in case of an underflow.
       amount_of_external_allocated_memory_ = 0;
       amount_of_external_allocated_memory_at_last_global_gc_ = 0;
     }
@@ -558,8 +558,11 @@
   if (FLAG_trace_external_memory) {
     PrintPID("%8.0f ms: ", isolate()->time_millis_since_init());
     PrintF("Adjust amount of external memory: delta=%6" V8_PTR_PREFIX "d KB, "
-           " amount=%6" V8_PTR_PREFIX "d KB, isolate=0x%08" V8PRIxPTR ".\n",
-           change_in_bytes / 1024, amount_of_external_allocated_memory_ / 1024,
+           "amount=%6" V8_PTR_PREFIX "d KB, since_gc=%6" V8_PTR_PREFIX "d KB, "
+           "isolate=0x%08" V8PRIxPTR ".\n",
+           change_in_bytes / KB,
+           amount_of_external_allocated_memory_ / KB,
+           PromotedExternalMemorySize() / KB,
            reinterpret_cast<intptr_t>(isolate()));
   }
   ASSERT(amount_of_external_allocated_memory_ >= 0);
@@ -567,11 +570,6 @@
 }
 
 
-void Heap::SetLastScriptId(Object* last_script_id) {
-  roots_[kLastScriptIdRootIndex] = last_script_id;
-}
-
-
 Isolate* Heap::isolate() {
   return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
       reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
diff --git a/src/heap-snapshot-generator.cc b/src/heap-snapshot-generator.cc
index 217d1ca..dd8896e 100644
--- a/src/heap-snapshot-generator.cc
+++ b/src/heap-snapshot-generator.cc
@@ -961,7 +961,7 @@
 
   bool extract_indexed_refs = true;
   if (obj->IsJSGlobalProxy()) {
-    ExtractJSGlobalProxyReferences(JSGlobalProxy::cast(obj));
+    ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj));
   } else if (obj->IsJSObject()) {
     ExtractJSObjectReferences(entry, JSObject::cast(obj));
   } else if (obj->IsString()) {
@@ -974,6 +974,8 @@
     ExtractSharedFunctionInfoReferences(entry, SharedFunctionInfo::cast(obj));
   } else if (obj->IsScript()) {
     ExtractScriptReferences(entry, Script::cast(obj));
+  } else if (obj->IsAccessorPair()) {
+    ExtractAccessorPairReferences(entry, AccessorPair::cast(obj));
   } else if (obj->IsCodeCache()) {
     ExtractCodeCacheReferences(entry, CodeCache::cast(obj));
   } else if (obj->IsCode()) {
@@ -994,19 +996,11 @@
 }
 
 
-void V8HeapExplorer::ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy) {
-  // We need to reference JS global objects from snapshot's root.
-  // We use JSGlobalProxy because this is what embedder (e.g. browser)
-  // uses for the global object.
-  Object* object = proxy->map()->prototype();
-  bool is_debug_object = false;
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  is_debug_object = object->IsGlobalObject() &&
-      Isolate::Current()->debug()->IsDebugGlobal(GlobalObject::cast(object));
-#endif
-  if (!is_debug_object) {
-    SetUserGlobalReference(object);
-  }
+void V8HeapExplorer::ExtractJSGlobalProxyReferences(
+    int entry, JSGlobalProxy* proxy) {
+  SetInternalReference(proxy, entry,
+                       "native_context", proxy->native_context(),
+                       JSGlobalProxy::kNativeContextOffset);
 }
 
 
@@ -1048,9 +1042,9 @@
     SetInternalReference(js_fun, entry,
                          "shared", shared_info,
                          JSFunction::kSharedFunctionInfoOffset);
-    TagObject(js_fun->unchecked_context(), "(context)");
+    TagObject(js_fun->context(), "(context)");
     SetInternalReference(js_fun, entry,
-                         "context", js_fun->unchecked_context(),
+                         "context", js_fun->context(),
                          JSFunction::kContextOffset);
     for (int i = JSFunction::kNonWeakFieldsEndOffset;
          i < JSFunction::kSize;
@@ -1242,6 +1236,15 @@
 }
 
 
+void V8HeapExplorer::ExtractAccessorPairReferences(
+    int entry, AccessorPair* accessors) {
+  SetInternalReference(accessors, entry, "getter", accessors->getter(),
+                       AccessorPair::kGetterOffset);
+  SetInternalReference(accessors, entry, "setter", accessors->setter(),
+                       AccessorPair::kSetterOffset);
+}
+
+
 void V8HeapExplorer::ExtractCodeCacheReferences(
     int entry, CodeCache* code_cache) {
   TagObject(code_cache->default_cache(), "(default code cache)");
@@ -1353,21 +1356,11 @@
               js_obj, entry,
               descs->GetKey(i), descs->GetConstantFunction(i));
           break;
-        case CALLBACKS: {
-          Object* callback_obj = descs->GetValue(i);
-          if (callback_obj->IsAccessorPair()) {
-            AccessorPair* accessors = AccessorPair::cast(callback_obj);
-            if (Object* getter = accessors->getter()) {
-              SetPropertyReference(js_obj, entry, descs->GetKey(i),
-                                   getter, "get-%s");
-            }
-            if (Object* setter = accessors->setter()) {
-              SetPropertyReference(js_obj, entry, descs->GetKey(i),
-                                   setter, "set-%s");
-            }
-          }
+        case CALLBACKS:
+          ExtractAccessorPairProperty(
+              js_obj, entry,
+              descs->GetKey(i), descs->GetValue(i));
           break;
-        }
         case NORMAL:  // only in slow mode
         case HANDLER:  // only in lookup results, not in descriptors
         case INTERCEPTOR:  // only in lookup results, not in descriptors
@@ -1389,18 +1382,35 @@
         Object* value = target->IsPropertyCell()
             ? PropertyCell::cast(target)->value()
             : target;
-        if (k != heap_->hidden_string()) {
-          SetPropertyReference(js_obj, entry, String::cast(k), value);
-        } else {
+        if (k == heap_->hidden_string()) {
           TagObject(value, "(hidden properties)");
           SetInternalReference(js_obj, entry, "hidden_properties", value);
+          continue;
         }
+        if (ExtractAccessorPairProperty(js_obj, entry, k, value)) continue;
+        SetPropertyReference(js_obj, entry, String::cast(k), value);
       }
     }
   }
 }
 
 
+bool V8HeapExplorer::ExtractAccessorPairProperty(
+    JSObject* js_obj, int entry, Object* key, Object* callback_obj) {
+  if (!callback_obj->IsAccessorPair()) return false;
+  AccessorPair* accessors = AccessorPair::cast(callback_obj);
+  Object* getter = accessors->getter();
+  if (!getter->IsOddball()) {
+    SetPropertyReference(js_obj, entry, String::cast(key), getter, "get %s");
+  }
+  Object* setter = accessors->setter();
+  if (!setter->IsOddball()) {
+    SetPropertyReference(js_obj, entry, String::cast(key), setter, "set %s");
+  }
+  return true;
+}
+
+
 void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
   if (js_obj->HasFastObjectElements()) {
     FixedArray* elements = FixedArray::cast(js_obj->elements());
@@ -1760,6 +1770,22 @@
           snapshot_->gc_subroot(tag)->index(),
           child_entry);
     }
+
+    // Add a shortcut to JS global object reference at snapshot root.
+    if (child_obj->IsNativeContext()) {
+      Context* context = Context::cast(child_obj);
+      GlobalObject* global = context->global_object();
+      if (global->IsJSGlobalObject()) {
+        bool is_debug_object = false;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+        is_debug_object = heap_->isolate()->debug()->IsDebugGlobal(global);
+#endif
+        if (!is_debug_object && !user_roots_.Contains(global)) {
+          user_roots_.Insert(global);
+          SetUserGlobalReference(global);
+        }
+      }
+    }
   }
 }
 
diff --git a/src/heap-snapshot-generator.h b/src/heap-snapshot-generator.h
index c9a82f0..d3c8f1b 100644
--- a/src/heap-snapshot-generator.h
+++ b/src/heap-snapshot-generator.h
@@ -454,7 +454,7 @@
   const char* GetSystemEntryName(HeapObject* object);
 
   void ExtractReferences(HeapObject* obj);
-  void ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy);
+  void ExtractJSGlobalProxyReferences(int entry, JSGlobalProxy* proxy);
   void ExtractJSObjectReferences(int entry, JSObject* js_obj);
   void ExtractStringReferences(int entry, String* obj);
   void ExtractContextReferences(int entry, Context* context);
@@ -462,12 +462,15 @@
   void ExtractSharedFunctionInfoReferences(int entry,
                                            SharedFunctionInfo* shared);
   void ExtractScriptReferences(int entry, Script* script);
+  void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
   void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
   void ExtractCodeReferences(int entry, Code* code);
   void ExtractCellReferences(int entry, Cell* cell);
   void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
   void ExtractClosureReferences(JSObject* js_obj, int entry);
   void ExtractPropertyReferences(JSObject* js_obj, int entry);
+  bool ExtractAccessorPairProperty(JSObject* js_obj, int entry,
+                                   Object* key, Object* callback_obj);
   void ExtractElementReferences(JSObject* js_obj, int entry);
   void ExtractInternalReferences(JSObject* js_obj, int entry);
   bool IsEssentialObject(Object* object);
@@ -529,6 +532,7 @@
   SnapshotFillerInterface* filler_;
   HeapObjectsSet objects_tags_;
   HeapObjectsSet strong_gc_subroot_names_;
+  HeapObjectsSet user_roots_;
   v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
 
   static HeapObject* const kGcRootsObject;
diff --git a/src/heap.cc b/src/heap.cc
index 43c83e4..6196228 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1568,6 +1568,8 @@
       // tail is a live object, visit it.
       WeakListVisitor<T>::VisitLiveObject(
           heap, tail, retainer, record_slots);
+    } else {
+      WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
     }
 
     // Move to next element in the list.
@@ -1599,6 +1601,9 @@
   static void VisitLiveObject(Heap*, JSFunction*,
                               WeakObjectRetainer*, bool) {
   }
+
+  static void VisitPhantomObject(Heap*, JSFunction*) {
+  }
 };
 
 
@@ -1637,6 +1642,9 @@
     }
   }
 
+  static void VisitPhantomObject(Heap*, Context*) {
+  }
+
   static int WeakNextOffset() {
     return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
   }
@@ -1666,22 +1674,24 @@
 
 
 template<>
-struct WeakListVisitor<JSTypedArray> {
-  static void SetWeakNext(JSTypedArray* obj, Object* next) {
+struct WeakListVisitor<JSArrayBufferView> {
+  static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
     obj->set_weak_next(next);
   }
 
-  static Object* WeakNext(JSTypedArray* obj) {
+  static Object* WeakNext(JSArrayBufferView* obj) {
     return obj->weak_next();
   }
 
   static void VisitLiveObject(Heap*,
-                              JSTypedArray* obj,
+                              JSArrayBufferView* obj,
                               WeakObjectRetainer* retainer,
                               bool record_slots) {}
 
+  static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
+
   static int WeakNextOffset() {
-    return JSTypedArray::kWeakNextOffset;
+    return JSArrayBufferView::kWeakNextOffset;
   }
 };
 
@@ -1701,18 +1711,22 @@
                               WeakObjectRetainer* retainer,
                               bool record_slots) {
     Object* typed_array_obj =
-        VisitWeakList<JSTypedArray>(
+        VisitWeakList<JSArrayBufferView>(
             heap,
-            array_buffer->weak_first_array(),
+            array_buffer->weak_first_view(),
             retainer, record_slots);
-    array_buffer->set_weak_first_array(typed_array_obj);
+    array_buffer->set_weak_first_view(typed_array_obj);
     if (typed_array_obj != heap->undefined_value() && record_slots) {
       Object** slot = HeapObject::RawField(
-          array_buffer, JSArrayBuffer::kWeakFirstArrayOffset);
+          array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
       heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
     }
   }
 
+  static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
+    Runtime::FreeArrayBuffer(heap->isolate(), phantom);
+  }
+
   static int WeakNextOffset() {
     return JSArrayBuffer::kWeakNextOffset;
   }
@@ -1729,6 +1743,17 @@
 }
 
 
+void Heap::TearDownArrayBuffers() {
+  Object* undefined = undefined_value();
+  for (Object* o = array_buffers_list(); o != undefined;) {
+    JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
+    Runtime::FreeArrayBuffer(isolate(), buffer);
+    o = buffer->weak_next();
+  }
+  array_buffers_list_ = undefined;
+}
+
+
 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
   DisallowHeapAllocation no_allocation;
 
@@ -1912,6 +1937,10 @@
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
                     Visit);
 
+    table_.Register(kVisitJSDataView,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
+                    Visit);
+
     table_.Register(kVisitJSRegExp,
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
                     Visit);
@@ -2830,8 +2859,11 @@
   }
   HeapObject::cast(result)->set_map_no_write_barrier(
       global_property_cell_map());
-  PropertyCell::cast(result)->set_value(value);
-  PropertyCell::cast(result)->set_type(Type::None());
+  PropertyCell* cell = PropertyCell::cast(result);
+  cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
+                           SKIP_WRITE_BARRIER);
+  cell->set_value(value);
+  cell->set_type(Type::None());
   return result;
 }
 
@@ -3126,7 +3158,7 @@
   set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
 
   // Handling of script id generation is in Factory::NewScript.
-  set_last_script_id(undefined_value());
+  set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
 
   // Initialize keyed lookup cache.
   isolate_->keyed_lookup_cache()->Clear();
@@ -4396,7 +4428,10 @@
   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
 
   // Allocate the backing storage for the properties.
-  int prop_size = map->InitialPropertiesLength();
+  int prop_size =
+      map->pre_allocated_property_fields() +
+      map->unused_property_fields() -
+      map->inobject_properties();
   ASSERT(prop_size >= 0);
   Object* properties;
   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
@@ -4433,7 +4468,10 @@
   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
 
   // Allocate the backing storage for the properties.
-  int prop_size = map->InitialPropertiesLength();
+  int prop_size =
+      map->pre_allocated_property_fields() +
+      map->unused_property_fields() -
+      map->inobject_properties();
   ASSERT(prop_size >= 0);
   Object* properties;
   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
@@ -6620,7 +6658,12 @@
   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
-  external_allocation_limit_ = 16 * max_semispace_size_;
+
+  // The external allocation limit should be below 256 MB on all architectures
+  // to avoid unnecessary low memory notifications, as that is the threshold
+  // for some embedders.
+  external_allocation_limit_ = 12 * max_semispace_size_;
+  ASSERT(external_allocation_limit_ <= 256 * MB);
 
   // The old generation is paged and needs at least one page for each space.
   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
@@ -6869,6 +6912,8 @@
     PrintF("\n\n");
   }
 
+  TearDownArrayBuffers();
+
   isolate_->global_handles()->TearDown();
 
   external_string_table_.TearDown();
diff --git a/src/heap.h b/src/heap.h
index 78d9093..b3a4205 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -175,7 +175,7 @@
   V(Code, js_entry_code, JsEntryCode)                                          \
   V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
   V(FixedArray, natives_source_cache, NativesSourceCache)                      \
-  V(Object, last_script_id, LastScriptId)                                      \
+  V(Smi, last_script_id, LastScriptId)                                         \
   V(Script, empty_script, EmptyScript)                                         \
   V(Smi, real_stack_limit, RealStackLimit)                                     \
   V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames)        \
@@ -1440,9 +1440,6 @@
     roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
   }
 
-  // Update the next script id.
-  inline void SetLastScriptId(Object* last_script_id);
-
   // Generated code can embed this address to get access to the roots.
   Object** roots_array_start() { return roots_; }
 
@@ -1873,7 +1870,7 @@
   enum {
     FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
     FIRST_FIXED_ARRAY_SUB_TYPE =
-        FIRST_CODE_KIND_SUB_TYPE + Code::LAST_CODE_KIND + 1,
+        FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
     OBJECT_STATS_COUNT =
         FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1
   };
@@ -1885,7 +1882,7 @@
       object_sizes_[type] += size;
     } else {
       if (type == CODE_TYPE) {
-        ASSERT(sub_type <= Code::LAST_CODE_KIND);
+        ASSERT(sub_type < Code::NUMBER_OF_KINDS);
         object_counts_[FIRST_CODE_KIND_SUB_TYPE + sub_type]++;
         object_sizes_[FIRST_CODE_KIND_SUB_TYPE + sub_type] += size;
       } else if (type == FIXED_ARRAY_TYPE) {
@@ -2192,6 +2189,9 @@
   void ProcessNativeContexts(WeakObjectRetainer* retainer, bool record_slots);
   void ProcessArrayBuffers(WeakObjectRetainer* retainer, bool record_slots);
 
+  // Called on heap tear-down.
+  void TearDownArrayBuffers();
+
   // Record statistics before and after garbage collection.
   void ReportStatisticsBeforeGC();
   void ReportStatisticsAfterGC();
diff --git a/src/hydrogen-environment-liveness.cc b/src/hydrogen-environment-liveness.cc
index 8c66059..20e680c 100644
--- a/src/hydrogen-environment-liveness.cc
+++ b/src/hydrogen-environment-liveness.cc
@@ -33,65 +33,56 @@
 namespace internal {
 
 
-EnvironmentSlotLivenessAnalyzer::EnvironmentSlotLivenessAnalyzer(
+HEnvironmentLivenessAnalysisPhase::HEnvironmentLivenessAnalysisPhase(
     HGraph* graph)
-    : graph_(graph),
-      zone_(graph->isolate()),
-      zone_scope_(&zone_, DELETE_ON_EXIT),
+    : HPhase("H_Environment liveness analysis", graph),
       block_count_(graph->blocks()->length()),
       maximum_environment_size_(graph->maximum_environment_size()),
+      live_at_block_start_(block_count_, zone()),
+      first_simulate_(block_count_, zone()),
+      first_simulate_invalid_for_index_(block_count_, zone()),
+      markers_(maximum_environment_size_, zone()),
       collect_markers_(true),
-      last_simulate_(NULL) {
-  if (maximum_environment_size_ == 0) return;
-
-  live_at_block_start_ =
-      new(zone()) ZoneList<BitVector*>(block_count_, zone());
-  first_simulate_ = new(zone()) ZoneList<HSimulate*>(block_count_, zone());
-  first_simulate_invalid_for_index_ =
-      new(zone()) ZoneList<BitVector*>(block_count_, zone());
-  markers_ = new(zone())
-      ZoneList<HEnvironmentMarker*>(maximum_environment_size_, zone());
-  went_live_since_last_simulate_ =
-      new(zone()) BitVector(maximum_environment_size_, zone());
-
+      last_simulate_(NULL),
+      went_live_since_last_simulate_(maximum_environment_size_, zone()) {
+  ASSERT(maximum_environment_size_ > 0);
   for (int i = 0; i < block_count_; ++i) {
-    live_at_block_start_->Add(
+    live_at_block_start_.Add(
         new(zone()) BitVector(maximum_environment_size_, zone()), zone());
-    first_simulate_->Add(NULL, zone());
-    first_simulate_invalid_for_index_->Add(
+    first_simulate_.Add(NULL, zone());
+    first_simulate_invalid_for_index_.Add(
         new(zone()) BitVector(maximum_environment_size_, zone()), zone());
   }
 }
 
 
-void EnvironmentSlotLivenessAnalyzer::ZapEnvironmentSlot(int index,
-                                                         HSimulate* simulate) {
+void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlot(
+    int index, HSimulate* simulate) {
   int operand_index = simulate->ToOperandIndex(index);
   if (operand_index == -1) {
-    simulate->AddAssignedValue(index, graph_->GetConstantUndefined());
+    simulate->AddAssignedValue(index, graph()->GetConstantUndefined());
   } else {
-    simulate->SetOperandAt(operand_index, graph_->GetConstantUndefined());
+    simulate->SetOperandAt(operand_index, graph()->GetConstantUndefined());
   }
 }
 
 
-void EnvironmentSlotLivenessAnalyzer::ZapEnvironmentSlotsInSuccessors(
-    HBasicBlock* block,
-    BitVector* live) {
+void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsInSuccessors(
+    HBasicBlock* block, BitVector* live) {
   // When a value is live in successor A but dead in B, we must
   // explicitly zap it in B.
   for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
     HBasicBlock* successor = it.Current();
     int successor_id = successor->block_id();
-    BitVector* live_in_successor = live_at_block_start_->at(successor_id);
+    BitVector* live_in_successor = live_at_block_start_[successor_id];
     if (live_in_successor->Equals(*live)) continue;
     for (int i = 0; i < live->length(); ++i) {
       if (!live->Contains(i)) continue;
       if (live_in_successor->Contains(i)) continue;
-      if (first_simulate_invalid_for_index_->at(successor_id)->Contains(i)) {
+      if (first_simulate_invalid_for_index_.at(successor_id)->Contains(i)) {
         continue;
       }
-      HSimulate* simulate = first_simulate_->at(successor_id);
+      HSimulate* simulate = first_simulate_.at(successor_id);
       if (simulate == NULL) continue;
       ASSERT(simulate->closure().is_identical_to(
                  block->last_environment()->closure()));
@@ -101,7 +92,7 @@
 }
 
 
-void EnvironmentSlotLivenessAnalyzer::ZapEnvironmentSlotsForInstruction(
+void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsForInstruction(
     HEnvironmentMarker* marker) {
   if (!marker->CheckFlag(HValue::kEndsLiveRange)) return;
   HSimulate* simulate = marker->next_simulate();
@@ -112,18 +103,18 @@
 }
 
 
-void EnvironmentSlotLivenessAnalyzer::UpdateLivenessAtBlockEnd(
+void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtBlockEnd(
     HBasicBlock* block,
     BitVector* live) {
   // Liveness at the end of each block: union of liveness in successors.
   live->Clear();
   for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
-    live->Union(*live_at_block_start_->at(it.Current()->block_id()));
+    live->Union(*live_at_block_start_[it.Current()->block_id()]);
   }
 }
 
 
-void EnvironmentSlotLivenessAnalyzer::UpdateLivenessAtInstruction(
+void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtInstruction(
     HInstruction* instr,
     BitVector* live) {
   switch (instr->opcode()) {
@@ -135,7 +126,7 @@
       } else {
         marker->ClearFlag(HValue::kEndsLiveRange);
       }
-      if (!went_live_since_last_simulate_->Contains(index)) {
+      if (!went_live_since_last_simulate_.Contains(index)) {
         marker->set_next_simulate(last_simulate_);
       }
       if (marker->kind() == HEnvironmentMarker::LOOKUP) {
@@ -143,11 +134,11 @@
       } else {
         ASSERT(marker->kind() == HEnvironmentMarker::BIND);
         live->Remove(index);
-        went_live_since_last_simulate_->Add(index);
+        went_live_since_last_simulate_.Add(index);
       }
       if (collect_markers_) {
         // Populate |markers_| list during the first pass.
-        markers_->Add(marker, &zone_);
+        markers_.Add(marker, zone());
       }
       break;
     }
@@ -174,8 +165,8 @@
         int return_id = enter->return_targets()->at(i)->block_id();
         // When an AbnormalExit is involved, it can happen that the return
         // target block doesn't actually exist.
-        if (return_id < live_at_block_start_->length()) {
-          live->Union(*live_at_block_start_->at(return_id));
+        if (return_id < live_at_block_start_.length()) {
+          live->Union(*live_at_block_start_[return_id]);
         }
       }
       last_simulate_ = NULL;
@@ -192,7 +183,7 @@
     }
     case HValue::kSimulate:
       last_simulate_ = HSimulate::cast(instr);
-      went_live_since_last_simulate_->Clear();
+      went_live_since_last_simulate_.Clear();
       break;
     default:
       break;
@@ -200,47 +191,46 @@
 }
 
 
-void EnvironmentSlotLivenessAnalyzer::AnalyzeAndTrim() {
-  HPhase phase("H_EnvironmentLivenessAnalysis", graph_);
-  if (maximum_environment_size_ == 0) return;
+void HEnvironmentLivenessAnalysisPhase::Run() {
+  ASSERT(maximum_environment_size_ > 0);
 
   // Main iteration. Compute liveness of environment slots, and store it
   // for each block until it doesn't change any more. For efficiency, visit
   // blocks in reverse order and walk backwards through each block. We
   // need several iterations to propagate liveness through nested loops.
-  BitVector* live = new(zone()) BitVector(maximum_environment_size_, zone());
-  BitVector* worklist = new(zone()) BitVector(block_count_, zone());
+  BitVector live(maximum_environment_size_, zone());
+  BitVector worklist(block_count_, zone());
   for (int i = 0; i < block_count_; ++i) {
-    worklist->Add(i);
+    worklist.Add(i);
   }
-  while (!worklist->IsEmpty()) {
+  while (!worklist.IsEmpty()) {
     for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
-      if (!worklist->Contains(block_id)) {
+      if (!worklist.Contains(block_id)) {
         continue;
       }
-      worklist->Remove(block_id);
+      worklist.Remove(block_id);
       last_simulate_ = NULL;
 
-      HBasicBlock* block = graph_->blocks()->at(block_id);
-      UpdateLivenessAtBlockEnd(block, live);
+      HBasicBlock* block = graph()->blocks()->at(block_id);
+      UpdateLivenessAtBlockEnd(block, &live);
 
       for (HInstruction* instr = block->last(); instr != NULL;
            instr = instr->previous()) {
-        UpdateLivenessAtInstruction(instr, live);
+        UpdateLivenessAtInstruction(instr, &live);
       }
 
       // Reached the start of the block, do necessary bookkeeping:
       // store computed information for this block and add predecessors
       // to the work list as necessary.
-      first_simulate_->Set(block_id, last_simulate_);
-      first_simulate_invalid_for_index_->at(block_id)->CopyFrom(
-          *went_live_since_last_simulate_);
-      if (live_at_block_start_->at(block_id)->UnionIsChanged(*live)) {
+      first_simulate_.Set(block_id, last_simulate_);
+      first_simulate_invalid_for_index_[block_id]->CopyFrom(
+          went_live_since_last_simulate_);
+      if (live_at_block_start_[block_id]->UnionIsChanged(live)) {
         for (int i = 0; i < block->predecessors()->length(); ++i) {
-          worklist->Add(block->predecessors()->at(i)->block_id());
+          worklist.Add(block->predecessors()->at(i)->block_id());
         }
         if (block->IsInlineReturnTarget()) {
-          worklist->Add(block->inlined_entry_block()->block_id());
+          worklist.Add(block->inlined_entry_block()->block_id());
         }
       }
     }
@@ -249,18 +239,18 @@
   }
 
   // Analysis finished. Zap dead environment slots.
-  for (int i = 0; i < markers_->length(); ++i) {
-    ZapEnvironmentSlotsForInstruction(markers_->at(i));
+  for (int i = 0; i < markers_.length(); ++i) {
+    ZapEnvironmentSlotsForInstruction(markers_[i]);
   }
   for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
-    HBasicBlock* block = graph_->blocks()->at(block_id);
-    UpdateLivenessAtBlockEnd(block, live);
-    ZapEnvironmentSlotsInSuccessors(block, live);
+    HBasicBlock* block = graph()->blocks()->at(block_id);
+    UpdateLivenessAtBlockEnd(block, &live);
+    ZapEnvironmentSlotsInSuccessors(block, &live);
   }
 
   // Finally, remove the HEnvironment{Bind,Lookup} markers.
-  for (int i = 0; i < markers_->length(); ++i) {
-    markers_->at(i)->DeleteAndReplaceWith(NULL);
+  for (int i = 0; i < markers_.length(); ++i) {
+    markers_[i]->DeleteAndReplaceWith(NULL);
   }
 }
 
diff --git a/src/hydrogen-environment-liveness.h b/src/hydrogen-environment-liveness.h
index 484e56d..248ec5c 100644
--- a/src/hydrogen-environment-liveness.h
+++ b/src/hydrogen-environment-liveness.h
@@ -43,11 +43,11 @@
 // the last lookup that refers to them.
 // Slots are identified by their index and only affected if whitelisted in
 // HOptimizedGraphBuilder::IsEligibleForEnvironmentLivenessAnalysis().
-class EnvironmentSlotLivenessAnalyzer {
+class HEnvironmentLivenessAnalysisPhase : public HPhase {
  public:
-  explicit EnvironmentSlotLivenessAnalyzer(HGraph* graph);
+  explicit HEnvironmentLivenessAnalysisPhase(HGraph* graph);
 
-  void AnalyzeAndTrim();
+  void Run();
 
  private:
   void ZapEnvironmentSlot(int index, HSimulate* simulate);
@@ -56,14 +56,6 @@
   void UpdateLivenessAtBlockEnd(HBasicBlock* block, BitVector* live);
   void UpdateLivenessAtInstruction(HInstruction* instr, BitVector* live);
 
-  Zone* zone() { return &zone_; }
-
-  HGraph* graph_;
-  // Use a dedicated Zone for this phase, with a ZoneScope to ensure it
-  // gets freed.
-  Zone zone_;
-  ZoneScope zone_scope_;
-
   int block_count_;
 
   // Largest number of local variables in any environment in the graph
@@ -71,21 +63,23 @@
   int maximum_environment_size_;
 
   // Per-block data. All these lists are indexed by block_id.
-  ZoneList<BitVector*>* live_at_block_start_;
-  ZoneList<HSimulate*>* first_simulate_;
-  ZoneList<BitVector*>* first_simulate_invalid_for_index_;
+  ZoneList<BitVector*> live_at_block_start_;
+  ZoneList<HSimulate*> first_simulate_;
+  ZoneList<BitVector*> first_simulate_invalid_for_index_;
 
   // List of all HEnvironmentMarker instructions for quick iteration/deletion.
   // It is populated during the first pass over the graph, controlled by
   // |collect_markers_|.
-  ZoneList<HEnvironmentMarker*>* markers_;
+  ZoneList<HEnvironmentMarker*> markers_;
   bool collect_markers_;
 
   // Keeps track of the last simulate seen, as well as the environment slots
   // for which a new live range has started since (so they must not be zapped
   // in that simulate when the end of another live range of theirs is found).
   HSimulate* last_simulate_;
-  BitVector* went_live_since_last_simulate_;
+  BitVector went_live_since_last_simulate_;
+
+  DISALLOW_COPY_AND_ASSIGN(HEnvironmentLivenessAnalysisPhase);
 };
 
 
diff --git a/src/hydrogen-gvn.cc b/src/hydrogen-gvn.cc
index aa2dff7..a277da8 100644
--- a/src/hydrogen-gvn.cc
+++ b/src/hydrogen-gvn.cc
@@ -361,41 +361,39 @@
 }
 
 
-HGlobalValueNumberer::HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
-      : graph_(graph),
-        info_(info),
+HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph)
+      : HPhase("H_Global value numbering", graph),
         removed_side_effects_(false),
-        block_side_effects_(graph->blocks()->length(), graph->zone()),
-        loop_side_effects_(graph->blocks()->length(), graph->zone()),
-        visited_on_paths_(graph->zone(), graph->blocks()->length()) {
+        block_side_effects_(graph->blocks()->length(), zone()),
+        loop_side_effects_(graph->blocks()->length(), zone()),
+        visited_on_paths_(zone(), graph->blocks()->length()) {
     ASSERT(!AllowHandleAllocation::IsAllowed());
-    block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
-                                 graph_->zone());
-    loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
-                                graph_->zone());
+    block_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
+                                 zone());
+    loop_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
+                                zone());
   }
 
-bool HGlobalValueNumberer::Analyze() {
+void HGlobalValueNumberingPhase::Analyze() {
   removed_side_effects_ = false;
   ComputeBlockSideEffects();
   if (FLAG_loop_invariant_code_motion) {
     LoopInvariantCodeMotion();
   }
   AnalyzeGraph();
-  return removed_side_effects_;
 }
 
 
-void HGlobalValueNumberer::ComputeBlockSideEffects() {
+void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
   // The Analyze phase of GVN can be called multiple times. Clear loop side
   // effects before computing them to erase the contents from previous Analyze
   // passes.
   for (int i = 0; i < loop_side_effects_.length(); ++i) {
     loop_side_effects_[i].RemoveAll();
   }
-  for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
+  for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
     // Compute side effects for the block.
-    HBasicBlock* block = graph_->blocks()->at(i);
+    HBasicBlock* block = graph()->blocks()->at(i);
     HInstruction* instr = block->first();
     int id = block->block_id();
     GVNFlagSet side_effects;
@@ -512,11 +510,11 @@
 }
 
 
-void HGlobalValueNumberer::LoopInvariantCodeMotion() {
+void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
   TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
-              graph_->use_optimistic_licm() ? "yes" : "no");
-  for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
-    HBasicBlock* block = graph_->blocks()->at(i);
+              graph()->use_optimistic_licm() ? "yes" : "no");
+  for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
     if (block->IsLoopHeader()) {
       GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
       TRACE_GVN_2("Try loop invariant motion for block B%d %s\n",
@@ -527,7 +525,7 @@
       GVNFlagSet accumulated_first_time_changes;
       HBasicBlock* last = block->loop_information()->GetLastBackEdge();
       for (int j = block->block_id(); j <= last->block_id(); ++j) {
-        ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects,
+        ProcessLoopBlock(graph()->blocks()->at(j), block, side_effects,
                          &accumulated_first_time_depends,
                          &accumulated_first_time_changes);
       }
@@ -536,7 +534,7 @@
 }
 
 
-void HGlobalValueNumberer::ProcessLoopBlock(
+void HGlobalValueNumberingPhase::ProcessLoopBlock(
     HBasicBlock* block,
     HBasicBlock* loop_header,
     GVNFlagSet loop_kills,
@@ -601,20 +599,21 @@
 }
 
 
-bool HGlobalValueNumberer::AllowCodeMotion() {
+bool HGlobalValueNumberingPhase::AllowCodeMotion() {
   return info()->IsStub() || info()->opt_count() + 1 < FLAG_max_opt_count;
 }
 
 
-bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
-                                      HBasicBlock* loop_header) {
+bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr,
+                                            HBasicBlock* loop_header) {
   // If we've disabled code motion or we're in a block that unconditionally
   // deoptimizes, don't move any instructions.
   return AllowCodeMotion() && !instr->block()->IsDeoptimizing();
 }
 
 
-GVNFlagSet HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
+GVNFlagSet
+HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock(
     HBasicBlock* dominator, HBasicBlock* dominated) {
   GVNFlagSet side_effects;
   for (int i = 0; i < dominated->predecessors()->length(); ++i) {
@@ -754,8 +753,8 @@
 // into a loop to avoid stack overflows.
 // The logical "stack frames" of the recursion are kept in a list of
 // GvnBasicBlockState instances.
-void HGlobalValueNumberer::AnalyzeGraph() {
-  HBasicBlock* entry_block = graph_->entry_block();
+void HGlobalValueNumberingPhase::AnalyzeGraph() {
+  HBasicBlock* entry_block = graph()->entry_block();
   HValueMap* entry_map = new(zone()) HValueMap(zone());
   GvnBasicBlockState* current =
       GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
@@ -826,7 +825,8 @@
 
     HBasicBlock* dominator_block;
     GvnBasicBlockState* next =
-        current->next_in_dominator_tree_traversal(zone(), &dominator_block);
+        current->next_in_dominator_tree_traversal(zone(),
+                                                  &dominator_block);
 
     if (next != NULL) {
       HBasicBlock* dominated = next->block();
diff --git a/src/hydrogen-gvn.h b/src/hydrogen-gvn.h
index c39765a..66224e4 100644
--- a/src/hydrogen-gvn.h
+++ b/src/hydrogen-gvn.h
@@ -76,14 +76,24 @@
 };
 
 
-class HGlobalValueNumberer BASE_EMBEDDED {
+// Perform common subexpression elimination and loop-invariant code motion.
+class HGlobalValueNumberingPhase : public HPhase {
  public:
-  HGlobalValueNumberer(HGraph* graph, CompilationInfo* info);
+  explicit HGlobalValueNumberingPhase(HGraph* graph);
 
-  // Returns true if values with side effects are removed.
-  bool Analyze();
+  void Run() {
+    Analyze();
+    // Trigger a second analysis pass to further eliminate duplicate values
+    // that could only be discovered by removing side-effect-generating
+    // instructions during the first pass.
+    if (FLAG_smi_only_arrays && removed_side_effects_) {
+      Analyze();
+      ASSERT(!removed_side_effects_);
+    }
+  }
 
  private:
+  void Analyze();
   GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
       HBasicBlock* dominator,
       HBasicBlock* dominated);
@@ -98,12 +108,6 @@
   bool AllowCodeMotion();
   bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
 
-  HGraph* graph() { return graph_; }
-  CompilationInfo* info() { return info_; }
-  Zone* zone() const { return graph_->zone(); }
-
-  HGraph* graph_;
-  CompilationInfo* info_;
   bool removed_side_effects_;
 
   // A map of block IDs to their side effects.
@@ -115,6 +119,8 @@
   // Used when collecting side effects on paths from dominator to
   // dominated.
   SparseSet visited_on_paths_;
+
+  DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase);
 };
 
 
diff --git a/src/hydrogen-infer-representation.cc b/src/hydrogen-infer-representation.cc
new file mode 100644
index 0000000..8439ad8
--- /dev/null
+++ b/src/hydrogen-infer-representation.cc
@@ -0,0 +1,173 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-infer-representation.h"
+
+namespace v8 {
+namespace internal {
+
+void HInferRepresentationPhase::AddToWorklist(HValue* current) {
+  if (current->representation().IsTagged()) return;
+  if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
+  if (in_worklist_.Contains(current->id())) return;
+  worklist_.Add(current, zone());
+  in_worklist_.Add(current->id());
+}
+
+
+void HInferRepresentationPhase::Run() {
+  // (1) Initialize bit vectors and count real uses. Each phi gets a
+  // bit-vector of length <number of phis>.
+  const ZoneList<HPhi*>* phi_list = graph()->phi_list();
+  int phi_count = phi_list->length();
+  ZoneList<BitVector*> connected_phis(phi_count, zone());
+  for (int i = 0; i < phi_count; ++i) {
+    phi_list->at(i)->InitRealUses(i);
+    BitVector* connected_set = new(zone()) BitVector(phi_count, zone());
+    connected_set->Add(i);
+    connected_phis.Add(connected_set, zone());
+  }
+
+  // (2) Do a fixed point iteration to find the set of connected phis.  A
+  // phi is connected to another phi if its value is used either directly or
+  // indirectly through a transitive closure of the def-use relation.
+  bool change = true;
+  while (change) {
+    change = false;
+    // We normally have far more "forward edges" than "backward edges",
+    // so we terminate faster when we walk backwards.
+    for (int i = phi_count - 1; i >= 0; --i) {
+      HPhi* phi = phi_list->at(i);
+      for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+        HValue* use = it.value();
+        if (use->IsPhi()) {
+          int id = HPhi::cast(use)->phi_id();
+          if (connected_phis[i]->UnionIsChanged(*connected_phis[id]))
+            change = true;
+        }
+      }
+    }
+  }
+
+  // Set truncation flags for groups of connected phis. This is a conservative
+  // approximation; the flag will be properly re-computed after representations
+  // have been determined.
+  if (phi_count > 0) {
+    BitVector done(phi_count, zone());
+    for (int i = 0; i < phi_count; ++i) {
+      if (done.Contains(i)) continue;
+
+      // Check if all uses of all connected phis in this group are truncating.
+      bool all_uses_everywhere_truncating = true;
+      for (BitVector::Iterator it(connected_phis[i]);
+           !it.Done();
+           it.Advance()) {
+        int index = it.Current();
+        all_uses_everywhere_truncating &=
+            phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToInt32);
+        done.Add(index);
+      }
+      if (all_uses_everywhere_truncating) {
+        continue;  // Great, nothing to do.
+      }
+      // Clear truncation flag of this group of connected phis.
+      for (BitVector::Iterator it(connected_phis[i]);
+           !it.Done();
+           it.Advance()) {
+        int index = it.Current();
+        phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
+      }
+    }
+  }
+
+  // Simplify constant phi inputs where possible.
+  // This step uses kTruncatingToInt32 flags of phis.
+  for (int i = 0; i < phi_count; ++i) {
+    phi_list->at(i)->SimplifyConstantInputs();
+  }
+
+  // Use the phi reachability information from step 2 to
+  // sum up the non-phi use counts of all connected phis.
+  for (int i = 0; i < phi_count; ++i) {
+    HPhi* phi = phi_list->at(i);
+    for (BitVector::Iterator it(connected_phis[i]);
+         !it.Done();
+         it.Advance()) {
+      int index = it.Current();
+      HPhi* it_use = phi_list->at(index);
+      if (index != i) phi->AddNonPhiUsesFrom(it_use);  // Don't count twice.
+    }
+  }
+
+  // Initialize work list
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    const ZoneList<HPhi*>* phis = block->phis();
+    for (int j = 0; j < phis->length(); ++j) {
+      AddToWorklist(phis->at(j));
+    }
+
+    HInstruction* current = block->first();
+    while (current != NULL) {
+      AddToWorklist(current);
+      current = current->next();
+    }
+  }
+
+  // Do a fixed point iteration, trying to improve representations
+  while (!worklist_.is_empty()) {
+    HValue* current = worklist_.RemoveLast();
+    in_worklist_.Remove(current->id());
+    current->InferRepresentation(this);
+  }
+
+  // Lastly: any instruction that we don't have representation information
+  // for defaults to Tagged.
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    const ZoneList<HPhi*>* phis = block->phis();
+    for (int j = 0; j < phis->length(); ++j) {
+      HPhi* phi = phis->at(j);
+      if (phi->representation().IsNone()) {
+        phi->ChangeRepresentation(Representation::Tagged());
+      }
+    }
+    for (HInstruction* current = block->first();
+         current != NULL; current = current->next()) {
+      if (current->representation().IsNone() &&
+          current->CheckFlag(HInstruction::kFlexibleRepresentation)) {
+        if (current->CheckFlag(HInstruction::kCannotBeTagged)) {
+          current->ChangeRepresentation(Representation::Double());
+        } else {
+          current->ChangeRepresentation(Representation::Tagged());
+        }
+      }
+    }
+  }
+}
+
+} }  // namespace v8::internal
diff --git a/src/hydrogen-infer-representation.h b/src/hydrogen-infer-representation.h
new file mode 100644
index 0000000..7c60569
--- /dev/null
+++ b/src/hydrogen-infer-representation.h
@@ -0,0 +1,57 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_INFER_REPRESENTATION_H_
+#define V8_HYDROGEN_INFER_REPRESENTATION_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HInferRepresentationPhase : public HPhase {
+ public:
+  explicit HInferRepresentationPhase(HGraph* graph)
+      : HPhase("H_Infer representations", graph),
+        worklist_(8, zone()),
+        in_worklist_(graph->GetMaximumValueID(), zone()) { }
+
+  void Run();
+  void AddToWorklist(HValue* current);
+
+ private:
+  ZoneList<HValue*> worklist_;
+  BitVector in_worklist_;
+
+  DISALLOW_COPY_AND_ASSIGN(HInferRepresentationPhase);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_HYDROGEN_INFER_REPRESENTATION_H_
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 170f5ed..98fa93f 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -29,7 +29,7 @@
 
 #include "double.h"
 #include "factory.h"
-#include "hydrogen.h"
+#include "hydrogen-infer-representation.h"
 
 #if V8_TARGET_ARCH_IA32
 #include "ia32/lithium-ia32.h"
@@ -78,7 +78,7 @@
 }
 
 
-void HValue::InferRepresentation(HInferRepresentation* h_infer) {
+void HValue::InferRepresentation(HInferRepresentationPhase* h_infer) {
   ASSERT(CheckFlag(kFlexibleRepresentation));
   Representation new_rep = RepresentationFromInputs();
   UpdateRepresentation(new_rep, h_infer, "inputs");
@@ -124,10 +124,11 @@
 
 
 void HValue::UpdateRepresentation(Representation new_rep,
-                                  HInferRepresentation* h_infer,
+                                  HInferRepresentationPhase* h_infer,
                                   const char* reason) {
   Representation r = representation();
   if (new_rep.is_more_general_than(r)) {
+    if (CheckFlag(kCannotBeTagged) && new_rep.IsTagged()) return;
     if (FLAG_trace_representation) {
       PrintF("Changing #%d %s representation %s -> %s based on %s\n",
              id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
@@ -138,7 +139,7 @@
 }
 
 
-void HValue::AddDependantsToWorklist(HInferRepresentation* h_infer) {
+void HValue::AddDependantsToWorklist(HInferRepresentationPhase* h_infer) {
   for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
     h_infer->AddToWorklist(it.value());
   }
@@ -1149,14 +1150,18 @@
 }
 
 
-void HBoundsCheck::InferRepresentation(HInferRepresentation* h_infer) {
+void HBoundsCheck::InferRepresentation(HInferRepresentationPhase* h_infer) {
   ASSERT(CheckFlag(kFlexibleRepresentation));
   HValue* actual_index = index()->ActualValue();
   HValue* actual_length = length()->ActualValue();
   Representation index_rep = actual_index->representation();
   Representation length_rep = actual_length->representation();
-  if (index_rep.IsTagged()) index_rep = Representation::Smi();
-  if (length_rep.IsTagged()) length_rep = Representation::Smi();
+  if (index_rep.IsTagged() && actual_index->type().IsSmi()) {
+    index_rep = Representation::Smi();
+  }
+  if (length_rep.IsTagged() && actual_length->type().IsSmi()) {
+    length_rep = Representation::Smi();
+  }
   Representation r = index_rep.generalize(length_rep);
   if (r.is_more_general_than(Representation::Integer32())) {
     r = Representation::Integer32();
@@ -1220,6 +1225,13 @@
 }
 
 
+void HCallNewArray::PrintDataTo(StringStream* stream) {
+  stream->Add(ElementsKindToString(elements_kind()));
+  stream->Add(" ");
+  HBinaryCall::PrintDataTo(stream);
+}
+
+
 void HCallRuntime::PrintDataTo(StringStream* stream) {
   stream->Add("%o ", *name());
   stream->Add("#%d", argument_count());
@@ -1276,20 +1288,26 @@
 
 Representation HBranch::observed_input_representation(int index) {
   static const ToBooleanStub::Types tagged_types(
-      ToBooleanStub::UNDEFINED |
       ToBooleanStub::NULL_TYPE |
       ToBooleanStub::SPEC_OBJECT |
       ToBooleanStub::STRING |
       ToBooleanStub::SYMBOL);
   if (expected_input_types_.ContainsAnyOf(tagged_types)) {
     return Representation::Tagged();
-  } else if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
-    return Representation::Double();
-  } else if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
-    return Representation::Smi();
-  } else {
-    return Representation::None();
   }
+  if (expected_input_types_.Contains(ToBooleanStub::UNDEFINED)) {
+    if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+      return Representation::Double();
+    }
+    return Representation::Tagged();
+  }
+  if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+    return Representation::Double();
+  }
+  if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
+    return Representation::Smi();
+  }
+  return Representation::None();
 }
 
 
@@ -1754,9 +1772,12 @@
 
 
 Range* HPhi::InferRange(Zone* zone) {
-  if (representation().IsInteger32()) {
+  Representation r = representation();
+  if (r.IsSmiOrInteger32()) {
     if (block()->IsLoopHeader()) {
-      Range* range = new(zone) Range(kMinInt, kMaxInt);
+      Range* range = r.IsSmi()
+          ? new(zone) Range(Smi::kMinValue, Smi::kMaxValue)
+          : new(zone) Range(kMinInt, kMaxInt);
       return range;
     } else {
       Range* range = OperandAt(0)->range()->Copy(zone);
@@ -2310,7 +2331,7 @@
 }
 
 
-void HBinaryOperation::InferRepresentation(HInferRepresentation* h_infer) {
+void HBinaryOperation::InferRepresentation(HInferRepresentationPhase* h_infer) {
   ASSERT(CheckFlag(kFlexibleRepresentation));
   Representation new_rep = RepresentationFromInputs();
   UpdateRepresentation(new_rep, h_infer, "inputs");
@@ -2373,7 +2394,7 @@
 }
 
 
-void HMathMinMax::InferRepresentation(HInferRepresentation* h_infer) {
+void HMathMinMax::InferRepresentation(HInferRepresentationPhase* h_infer) {
   ASSERT(CheckFlag(kFlexibleRepresentation));
   Representation new_rep = RepresentationFromInputs();
   UpdateRepresentation(new_rep, h_infer, "inputs");
@@ -2552,7 +2573,8 @@
 }
 
 
-void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) {
+void HCompareIDAndBranch::InferRepresentation(
+    HInferRepresentationPhase* h_infer) {
   Representation left_rep = left()->representation();
   Representation right_rep = right()->representation();
   Representation observed_left = observed_input_representation(0);
@@ -3051,9 +3073,8 @@
 }
 
 
-HType HCheckNonSmi::CalculateInferredType() {
-  // TODO(kasperl): Is there any way to signal that this isn't a smi?
-  return HType::Tagged();
+HType HCheckHeapObject::CalculateInferredType() {
+  return HType::NonPrimitive();
 }
 
 
@@ -3143,6 +3164,11 @@
 }
 
 
+HType HAllocateObject::CalculateInferredType() {
+  return HType::JSObject();
+}
+
+
 HType HAllocate::CalculateInferredType() {
   return type_;
 }
@@ -3323,10 +3349,9 @@
     HConstant* c_right = HConstant::cast(right);
     HConstant* c_left = HConstant::cast(left);
     if (c_left->HasStringValue() && c_right->HasStringValue()) {
-      Factory* factory = Isolate::Current()->factory();
-      return new(zone) HConstant(factory->NewConsString(c_left->StringValue(),
-                                                        c_right->StringValue()),
-                                 Representation::Tagged());
+      Handle<String> concat = zone->isolate()->factory()->NewFlatConcatString(
+          c_left->StringValue(), c_right->StringValue());
+      return new(zone) HConstant(concat, Representation::Tagged());
     }
   }
   return new(zone) HStringAdd(context, left, right);
@@ -3483,8 +3508,7 @@
                         HValue* context,
                         HValue* left,
                         HValue* right,
-                        bool has_fixed_right_arg,
-                        int fixed_right_arg_value) {
+                        Maybe<int> fixed_right_arg) {
   if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
     HConstant* c_left = HConstant::cast(left);
     HConstant* c_right = HConstant::cast(right);
@@ -3503,11 +3527,7 @@
       }
     }
   }
-  return new(zone) HMod(context,
-                        left,
-                        right,
-                        has_fixed_right_arg,
-                        fixed_right_arg_value);
+  return new(zone) HMod(context, left, right, fixed_right_arg);
 }
 
 
@@ -3659,7 +3679,7 @@
 }
 
 
-void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
+void HPhi::InferRepresentation(HInferRepresentationPhase* h_infer) {
   ASSERT(CheckFlag(kFlexibleRepresentation));
   Representation new_rep = RepresentationFromInputs();
   UpdateRepresentation(new_rep, h_infer, "inputs");
@@ -3723,7 +3743,7 @@
 }
 
 
-void HCheckNonSmi::Verify() {
+void HCheckHeapObject::Verify() {
   HInstruction::Verify();
   ASSERT(HasNoUses());
 }
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 7554fd8..bb74687 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -45,7 +45,7 @@
 // Forward declarations.
 class HBasicBlock;
 class HEnvironment;
-class HInferRepresentation;
+class HInferRepresentationPhase;
 class HInstruction;
 class HLoopInformation;
 class HValue;
@@ -66,6 +66,7 @@
   V(AccessArgumentsAt)                         \
   V(Add)                                       \
   V(Allocate)                                  \
+  V(AllocateObject)                            \
   V(ApplyArguments)                            \
   V(ArgumentsElements)                         \
   V(ArgumentsLength)                           \
@@ -88,9 +89,9 @@
   V(CallStub)                                  \
   V(Change)                                    \
   V(CheckFunction)                             \
+  V(CheckHeapObject)                           \
   V(CheckInstanceType)                         \
   V(CheckMaps)                                 \
-  V(CheckNonSmi)                               \
   V(CheckPrototypeMaps)                        \
   V(ClampToUint8)                              \
   V(ClassOfTestAndBranch)                      \
@@ -433,7 +434,7 @@
 
   bool IsHeapObject() const {
     ASSERT(type_ != kUninitialized);
-    return IsHeapNumber() || IsString() || IsNonPrimitive();
+    return IsHeapNumber() || IsString() || IsBoolean() || IsNonPrimitive();
   }
 
   static HType TypeFromValue(Handle<Object> value);
@@ -783,6 +784,7 @@
 
   enum Flag {
     kFlexibleRepresentation,
+    kCannotBeTagged,
     // Participate in Global Value Numbering, i.e. elimination of
     // unnecessary recomputations. If an instruction sets this flag, it must
     // implement DataEquals(), which will be used to determine if other
@@ -888,6 +890,7 @@
   Representation representation() const { return representation_; }
   void ChangeRepresentation(Representation r) {
     ASSERT(CheckFlag(kFlexibleRepresentation));
+    ASSERT(!CheckFlag(kCannotBeTagged) || !r.IsTagged());
     RepresentationChanged(r);
     representation_ = r;
     if (r.IsTagged()) {
@@ -915,6 +918,10 @@
     type_ = new_type;
   }
 
+  bool IsHeapObject() {
+    return representation_.IsHeapObject() || type_.IsHeapObject();
+  }
+
   // An operation needs to override this function iff:
   //   1) it can produce an int32 output.
   //   2) the true value of its output can potentially be minus zero.
@@ -1060,7 +1067,7 @@
     return Representation::None();
   }
   virtual Representation RequiredInputRepresentation(int index) = 0;
-  virtual void InferRepresentation(HInferRepresentation* h_infer);
+  virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
 
   // This gives the instruction an opportunity to replace itself with an
   // instruction that does the same in some better way.  To replace an
@@ -1152,9 +1159,9 @@
   Representation RepresentationFromUses();
   Representation RepresentationFromUseRequirements();
   virtual void UpdateRepresentation(Representation new_rep,
-                                    HInferRepresentation* h_infer,
+                                    HInferRepresentationPhase* h_infer,
                                     const char* reason);
-  void AddDependantsToWorklist(HInferRepresentation* h_infer);
+  void AddDependantsToWorklist(HInferRepresentationPhase* h_infer);
 
   virtual void RepresentationChanged(Representation to) { }
 
@@ -1596,17 +1603,21 @@
   HBranch(HValue* value,
           HBasicBlock* true_target,
           HBasicBlock* false_target,
-          ToBooleanStub::Types expected_input_types = ToBooleanStub::no_types())
+          ToBooleanStub::Types expected_input_types = ToBooleanStub::Types())
       : HUnaryControlInstruction(value, true_target, false_target),
         expected_input_types_(expected_input_types) {
     ASSERT(true_target != NULL && false_target != NULL);
+    SetFlag(kAllowUndefinedAsNaN);
   }
   explicit HBranch(HValue* value)
-      : HUnaryControlInstruction(value, NULL, NULL) { }
+      : HUnaryControlInstruction(value, NULL, NULL) {
+    SetFlag(kAllowUndefinedAsNaN);
+  }
   HBranch(HValue* value, ToBooleanStub::Types expected_input_types)
       : HUnaryControlInstruction(value, NULL, NULL),
-        expected_input_types_(expected_input_types) { }
-
+        expected_input_types_(expected_input_types) {
+    SetFlag(kAllowUndefinedAsNaN);
+  }
 
   virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
@@ -2477,12 +2488,12 @@
 class HCallNewArray: public HCallNew {
  public:
   HCallNewArray(HValue* context, HValue* constructor, int argument_count,
-                Handle<Cell> type_cell)
+                Handle<Cell> type_cell, ElementsKind elements_kind)
       : HCallNew(context, constructor, argument_count),
-        type_cell_(type_cell) {
-    elements_kind_ = static_cast<ElementsKind>(
-        Smi::cast(type_cell->value())->value());
-  }
+        elements_kind_(elements_kind),
+        type_cell_(type_cell) {}
+
+  virtual void PrintDataTo(StringStream* stream);
 
   Handle<Cell> property_cell() const {
     return type_cell_;
@@ -2941,9 +2952,9 @@
 };
 
 
-class HCheckNonSmi: public HUnaryOperation {
+class HCheckHeapObject: public HUnaryOperation {
  public:
-  explicit HCheckNonSmi(HValue* value) : HUnaryOperation(value) {
+  explicit HCheckHeapObject(HValue* value) : HUnaryOperation(value) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
   }
@@ -2960,17 +2971,13 @@
 
   virtual HValue* Canonicalize() {
     HType value_type = value()->type();
-    if (!value_type.IsUninitialized() &&
-        (value_type.IsHeapNumber() ||
-         value_type.IsString() ||
-         value_type.IsBoolean() ||
-         value_type.IsNonPrimitive())) {
+    if (!value_type.IsUninitialized() && value_type.IsHeapObject()) {
       return NULL;
     }
     return this;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi)
+  DECLARE_CONCRETE_INSTRUCTION(CheckHeapObject)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -3068,7 +3075,7 @@
   virtual Representation RepresentationFromInputs();
 
   virtual Range* InferRange(Zone* zone);
-  virtual void InferRepresentation(HInferRepresentation* h_infer);
+  virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
   virtual Representation RequiredInputRepresentation(int index) {
     return representation();
   }
@@ -3490,12 +3497,12 @@
     return observed_input_representation_[index - 1];
   }
 
-  virtual void InferRepresentation(HInferRepresentation* h_infer);
+  virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
   virtual Representation RepresentationFromInputs();
   virtual void AssumeRepresentation(Representation r);
 
   virtual void UpdateRepresentation(Representation new_rep,
-                                    HInferRepresentation* h_infer,
+                                    HInferRepresentationPhase* h_infer,
                                     const char* reason) {
     // By default, binary operations don't handle Smis.
     if (new_rep.IsSmi()) {
@@ -3707,7 +3714,7 @@
                                       int scale = 0);
 
   virtual void PrintDataTo(StringStream* stream);
-  virtual void InferRepresentation(HInferRepresentation* h_infer);
+  virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
 
   HValue* index() { return OperandAt(0); }
   HValue* length() { return OperandAt(1); }
@@ -3804,7 +3811,7 @@
   }
 
   virtual void UpdateRepresentation(Representation new_rep,
-                                    HInferRepresentation* h_infer,
+                                    HInferRepresentationPhase* h_infer,
                                     const char* reason) {
     // We only generate either int32 or generic tagged bitwise operations.
     if (new_rep.IsSmi() || new_rep.IsDouble()) {
@@ -3939,7 +3946,7 @@
       observed_input_representation_[1] = right;
   }
 
-  virtual void InferRepresentation(HInferRepresentation* h_infer);
+  virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
 
   virtual Representation RequiredInputRepresentation(int index) {
     return representation();
@@ -4461,11 +4468,9 @@
                            HValue* context,
                            HValue* left,
                            HValue* right,
-                           bool has_fixed_right_arg,
-                           int fixed_right_arg_value);
+                           Maybe<int> fixed_right_arg);
 
-  bool has_fixed_right_arg() const { return has_fixed_right_arg_; }
-  int fixed_right_arg_value() const { return fixed_right_arg_value_; }
+  Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
 
   bool HasPowerOf2Divisor() {
     if (right()->IsConstant() &&
@@ -4492,17 +4497,14 @@
   HMod(HValue* context,
        HValue* left,
        HValue* right,
-       bool has_fixed_right_arg,
-       int fixed_right_arg_value)
+       Maybe<int> fixed_right_arg)
       : HArithmeticBinaryOperation(context, left, right),
-        has_fixed_right_arg_(has_fixed_right_arg),
-        fixed_right_arg_value_(fixed_right_arg_value) {
+        fixed_right_arg_(fixed_right_arg) {
     SetFlag(kCanBeDivByZero);
     SetFlag(kCanOverflow);
   }
 
-  const bool has_fixed_right_arg_;
-  const int fixed_right_arg_value_;
+  const Maybe<int> fixed_right_arg_;
 };
 
 
@@ -4561,7 +4563,7 @@
     return RequiredInputRepresentation(index);
   }
 
-  virtual void InferRepresentation(HInferRepresentation* h_infer);
+  virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
 
   virtual Representation RepresentationFromInputs() {
     Representation left_rep = left()->representation();
@@ -4927,6 +4929,48 @@
 };
 
 
+class HAllocateObject: public HTemplateInstruction<1> {
+ public:
+  HAllocateObject(HValue* context, Handle<JSFunction> constructor)
+      : constructor_(constructor) {
+    SetOperandAt(0, context);
+    set_representation(Representation::Tagged());
+    SetGVNFlag(kChangesNewSpacePromotion);
+    constructor_initial_map_ = constructor->has_initial_map()
+        ? Handle<Map>(constructor->initial_map())
+        : Handle<Map>::null();
+    // If slack tracking finished, the instance size and property counts
+    // remain unchanged so that we can allocate memory for the object.
+    ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
+  }
+
+  // Maximum instance size for which allocations will be inlined.
+  static const int kMaxSize = 64 * kPointerSize;
+
+  HValue* context() { return OperandAt(0); }
+  Handle<JSFunction> constructor() { return constructor_; }
+  Handle<Map> constructor_initial_map() { return constructor_initial_map_; }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+  virtual Handle<Map> GetMonomorphicJSObjectMap() {
+    ASSERT(!constructor_initial_map_.is_null());
+    return constructor_initial_map_;
+  }
+  virtual HType CalculateInferredType();
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateObject)
+
+ private:
+  // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
+  //  virtual bool IsDeletable() const { return true; }
+
+  Handle<JSFunction> constructor_;
+  Handle<Map> constructor_initial_map_;
+};
+
+
 class HAllocate: public HTemplateInstruction<2> {
  public:
   enum Flags {
@@ -4945,9 +4989,6 @@
     SetGVNFlag(kChangesNewSpacePromotion);
   }
 
-  // Maximum instance size for which allocations will be inlined.
-  static const int kMaxInlineSize = 64 * kPointerSize;
-
   static Flags DefaultFlags() {
     return CAN_ALLOCATE_IN_NEW_SPACE;
   }
@@ -4972,14 +5013,6 @@
     }
   }
 
-  virtual Handle<Map> GetMonomorphicJSObjectMap() {
-    return known_initial_map_;
-  }
-
-  void set_known_initial_map(Handle<Map> known_initial_map) {
-    known_initial_map_ = known_initial_map;
-  }
-
   virtual HType CalculateInferredType();
 
   bool CanAllocateInNewSpace() const {
@@ -5014,7 +5047,6 @@
  private:
   HType type_;
   Flags flags_;
-  Handle<Map> known_initial_map_;
 };
 
 
@@ -5058,6 +5090,7 @@
         new_space_dominator);
   }
   if (object != new_space_dominator) return true;
+  if (object->IsAllocateObject()) return false;
   if (object->IsAllocate()) {
     return !HAllocate::cast(object)->GuaranteedInNewSpace();
   }
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 58a58f9..d3b9aa8 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -35,6 +35,7 @@
 #include "full-codegen.h"
 #include "hashmap.h"
 #include "hydrogen-environment-liveness.h"
+#include "hydrogen-infer-representation.h"
 #include "lithium-allocator.h"
 #include "parser.h"
 #include "scopeinfo.h"
@@ -526,7 +527,7 @@
 
 
 void HGraph::Verify(bool do_full_verify) const {
-  Heap::RelocationLock(isolate()->heap());
+  Heap::RelocationLock relocation_lock(isolate()->heap());
   AllowHandleDereference allow_deref;
   AllowDeferredHandleDereference allow_deferred_deref;
   for (int i = 0; i < blocks_.length(); i++) {
@@ -803,7 +804,7 @@
     // so that the graph builder visits it and sees any live range extending
     // constructs within it.
     HConstant* constant_false = builder_->graph()->GetConstantFalse();
-    ToBooleanStub::Types boolean_type = ToBooleanStub::no_types();
+    ToBooleanStub::Types boolean_type = ToBooleanStub::Types();
     boolean_type.Add(ToBooleanStub::BOOLEAN);
     HBranch* branch =
         new(zone()) HBranch(constant_false, first_true_block_,
@@ -963,7 +964,7 @@
 HGraph* HGraphBuilder::CreateGraph() {
   graph_ = new(zone()) HGraph(info_);
   if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
-  HPhase phase("H_Block building", isolate());
+  CompilationPhase phase("H_Block building", info_);
   set_current_block(graph()->entry_block());
   if (!BuildGraph()) return NULL;
   graph()->FinalizeUniqueValueIds();
@@ -989,18 +990,10 @@
 }
 
 
-HBoundsCheck* HGraphBuilder::AddBoundsCheck(HValue* index, HValue* length) {
-  HBoundsCheck* result = new(graph()->zone()) HBoundsCheck(index, length);
-  AddInstruction(result);
-  return result;
-}
-
-
 HReturn* HGraphBuilder::AddReturn(HValue* value) {
   HValue* context = environment()->LookupContext();
   int num_parameters = graph()->info()->num_parameters();
-  HValue* params = AddInstruction(new(graph()->zone())
-      HConstant(num_parameters));
+  HValue* params = Add<HConstant>(num_parameters);
   HReturn* return_instruction = new(graph()->zone())
       HReturn(value, context, params);
   current_block()->FinishExit(return_instruction);
@@ -1024,11 +1017,9 @@
 }
 
 
-HValue* HGraphBuilder::BuildCheckNonSmi(HValue* obj) {
+HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
   if (obj->type().IsHeapObject()) return obj;
-  HCheckNonSmi* check = new(zone()) HCheckNonSmi(obj);
-  AddInstruction(check);
-  return check;
+  return Add<HCheckHeapObject>(obj);
 }
 
 
@@ -1052,7 +1043,7 @@
     ASSERT(val != NULL);
     switch (elements_kind) {
       case EXTERNAL_PIXEL_ELEMENTS: {
-        val = AddInstruction(new(zone) HClampToUint8(val));
+        val = Add<HClampToUint8>(val);
         break;
       }
       case EXTERNAL_BYTE_ELEMENTS:
@@ -1139,8 +1130,7 @@
   length_checker.IfCompare(length, key, Token::EQ);
   length_checker.Then();
 
-  HValue* current_capacity =
-      AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+  HValue* current_capacity = Add<HFixedArrayBaseLength>(elements);
 
   IfBuilder capacity_checker(this);
 
@@ -1175,7 +1165,7 @@
 
   length_checker.Else();
 
-  AddBoundsCheck(key, length);
+  Add<HBoundsCheck>(key, length);
   environment()->Push(elements);
 
   length_checker.End();
@@ -1188,7 +1178,6 @@
                                                 HValue* elements,
                                                 ElementsKind kind,
                                                 HValue* length) {
-  Zone* zone = this->zone();
   Heap* heap = isolate()->heap();
 
   IfBuilder cow_checker(this);
@@ -1197,8 +1186,7 @@
                            Handle<Map>(heap->fixed_cow_array_map()));
   cow_checker.Then();
 
-  HValue* capacity =
-      AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+  HValue* capacity = Add<HFixedArrayBaseLength>(elements);
 
   HValue* new_elements = BuildGrowElementsCapacity(object, elements,
                                                    kind, length, capacity);
@@ -1255,15 +1243,14 @@
         Representation::Smi());
     length->set_type(HType::Smi());
   } else {
-    length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+    length = Add<HFixedArrayBaseLength>(elements);
   }
   HValue* checked_key = NULL;
   if (IsExternalArrayElementsKind(elements_kind)) {
     if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
       NoObservableSideEffectsScope no_effects(this);
       HLoadExternalArrayPointer* external_elements =
-          new(zone) HLoadExternalArrayPointer(elements);
-      AddInstruction(external_elements);
+          Add<HLoadExternalArrayPointer>(elements);
       IfBuilder length_checker(this);
       length_checker.IfCompare(key, length, Token::LT);
       length_checker.Then();
@@ -1280,10 +1267,9 @@
       return result;
     } else {
       ASSERT(store_mode == STANDARD_STORE);
-      checked_key = AddBoundsCheck(key, length);
+      checked_key = Add<HBoundsCheck>(key, length);
       HLoadExternalArrayPointer* external_elements =
-          new(zone) HLoadExternalArrayPointer(elements);
-      AddInstruction(external_elements);
+          Add<HLoadExternalArrayPointer>(elements);
       return AddInstruction(BuildExternalArrayElementAccess(
           external_elements, checked_key, val, mapcheck,
           elements_kind, is_store));
@@ -1298,8 +1284,7 @@
   // deopt, leaving the backing store in an invalid state.
   if (is_store && IsFastSmiElementsKind(elements_kind) &&
       !val->type().IsSmi()) {
-    val = AddInstruction(new(zone) HForceRepresentation(
-        val, Representation::Smi()));
+    val = Add<HForceRepresentation>(val, Representation::Smi());
   }
 
   if (IsGrowStoreMode(store_mode)) {
@@ -1308,7 +1293,7 @@
                                          length, key, is_js_array);
     checked_key = key;
   } else {
-    checked_key = AddBoundsCheck(key, length);
+    checked_key = Add<HBoundsCheck>(key, length);
 
     if (is_store && (fast_elements || fast_smi_only_elements)) {
       if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
@@ -1337,14 +1322,12 @@
 
   int elements_size = IsFastDoubleElementsKind(kind)
       ? kDoubleSize : kPointerSize;
-  HConstant* elements_size_value = new(zone) HConstant(elements_size);
-  AddInstruction(elements_size_value);
+  HConstant* elements_size_value = Add<HConstant>(elements_size);
   HValue* mul = AddInstruction(
                     HMul::New(zone, context, capacity, elements_size_value));
   mul->ClearFlag(HValue::kCanOverflow);
 
-  HConstant* header_size = new(zone) HConstant(FixedArray::kHeaderSize);
-  AddInstruction(header_size);
+  HConstant* header_size = Add<HConstant>(FixedArray::kHeaderSize);
   HValue* total_size = AddInstruction(
                            HAdd::New(zone, context, mul, header_size));
   total_size->ClearFlag(HValue::kCanOverflow);
@@ -1362,10 +1345,7 @@
     }
   }
 
-  HValue* elements =
-      AddInstruction(new(zone) HAllocate(context, total_size,
-                                         HType::JSArray(), flags));
-  return elements;
+  return Add<HAllocate>(context, total_size, HType::JSArray(), flags);
 }
 
 
@@ -1404,8 +1384,7 @@
   AddStore(array, HObjectAccess::ForMap(), array_map);
 
   HConstant* empty_fixed_array =
-      new(zone()) HConstant(isolate()->factory()->empty_fixed_array());
-  AddInstruction(empty_fixed_array);
+      Add<HConstant>(isolate()->factory()->empty_fixed_array());
 
   HObjectAccess access = HObjectAccess::ForPropertiesPointer();
   AddStore(array, access, empty_fixed_array);
@@ -1422,10 +1401,8 @@
     elements_location += AllocationSiteInfo::kSize;
   }
 
-  HInnerAllocatedObject* elements = new(zone()) HInnerAllocatedObject(
-      array, elements_location);
-  AddInstruction(elements);
-
+  HInnerAllocatedObject* elements =
+      Add<HInnerAllocatedObject>(array, elements_location);
   AddStore(array, HObjectAccess::ForElementsPointer(), elements);
   return elements;
 }
@@ -1449,7 +1426,7 @@
       HAdd::New(zone, context, half_old_capacity, old_capacity));
   new_capacity->ClearFlag(HValue::kCanOverflow);
 
-  HValue* min_growth = AddInstruction(new(zone) HConstant(16));
+  HValue* min_growth = Add<HConstant>(16);
 
   new_capacity = AddInstruction(
       HAdd::New(zone, context, new_capacity, min_growth));
@@ -1460,17 +1437,15 @@
 
 
 void HGraphBuilder::BuildNewSpaceArrayCheck(HValue* length, ElementsKind kind) {
-  Zone* zone = this->zone();
   Heap* heap = isolate()->heap();
   int element_size = IsFastDoubleElementsKind(kind) ? kDoubleSize
                                                     : kPointerSize;
   int max_size = heap->MaxRegularSpaceAllocationSize() / element_size;
   max_size -= JSArray::kSize / element_size;
-  HConstant* max_size_constant = new(zone) HConstant(max_size);
-  AddInstruction(max_size_constant);
+  HConstant* max_size_constant = Add<HConstant>(max_size);
   // Since we're forcing Integer32 representation for this HBoundsCheck,
   // there's no need to Smi-check the index.
-  AddInstruction(new(zone) HBoundsCheck(length, max_size_constant));
+  Add<HBoundsCheck>(length, max_size_constant);
 }
 
 
@@ -1506,10 +1481,9 @@
   Factory* factory = isolate()->factory();
 
   double nan_double = FixedDoubleArray::hole_nan_as_double();
-  Zone* zone = this->zone();
   HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
-      ? AddInstruction(new(zone) HConstant(factory->the_hole_value()))
-      : AddInstruction(new(zone) HConstant(nan_double));
+      ? Add<HConstant>(factory->the_hole_value())
+      : Add<HConstant>(nan_double);
 
   // Special loop unfolding case
   static const int kLoopUnfoldLimit = 4;
@@ -1536,15 +1510,15 @@
 
   if (unfold_loop) {
     for (int i = 0; i < initial_capacity; i++) {
-      HInstruction* key = AddInstruction(new(zone) HConstant(i));
-      AddInstruction(new(zone) HStoreKeyed(elements, key, hole, elements_kind));
+      HInstruction* key = Add<HConstant>(i);
+      Add<HStoreKeyed>(elements, key, hole, elements_kind);
     }
   } else {
     LoopBuilder builder(this, context, LoopBuilder::kPostIncrement);
 
     HValue* key = builder.BeginBody(from, to, Token::LT);
 
-    AddInstruction(new(zone) HStoreKeyed(elements, key, hole, elements_kind));
+    Add<HStoreKeyed>(elements, key, hole, elements_kind);
 
     builder.EndBody();
   }
@@ -1574,15 +1548,15 @@
 
   HValue* key = builder.BeginBody(graph()->GetConstant0(), length, Token::LT);
 
-  HValue* element =
-      AddInstruction(new(zone()) HLoadKeyed(from_elements, key, NULL,
-                                            from_elements_kind,
-                                            ALLOW_RETURN_HOLE));
+  HValue* element = Add<HLoadKeyed>(from_elements, key,
+                                    static_cast<HValue*>(NULL),
+                                    from_elements_kind,
+                                    ALLOW_RETURN_HOLE);
 
   ElementsKind holey_kind = IsFastSmiElementsKind(to_elements_kind)
       ? FAST_HOLEY_ELEMENTS : to_elements_kind;
-  HInstruction* holey_store = AddInstruction(
-      new(zone()) HStoreKeyed(to_elements, key, element, holey_kind));
+  HInstruction* holey_store = Add<HStoreKeyed>(to_elements, key,
+                                               element, holey_kind);
   // Allow NaN hole values to converted to their tagged counterparts.
   if (IsFastHoleyElementsKind(to_elements_kind)) {
     holey_store->SetFlag(HValue::kAllowUndefinedAsNaN);
@@ -1603,8 +1577,6 @@
                                               AllocationSiteMode mode,
                                               ElementsKind kind,
                                               int length) {
-  Zone* zone = this->zone();
-
   NoObservableSideEffectsScope no_effects(this);
 
   // All sizes here are multiples of kPointerSize.
@@ -1622,12 +1594,11 @@
   HAllocate::Flags allocate_flags = HAllocate::DefaultFlags(kind);
   // Allocate both the JS array and the elements array in one big
   // allocation. This avoids multiple limit checks.
-  HValue* size_in_bytes = AddInstruction(new(zone) HConstant(size));
-  HInstruction* object =
-      AddInstruction(new(zone) HAllocate(context,
-                                         size_in_bytes,
-                                         HType::JSObject(),
-                                         allocate_flags));
+  HValue* size_in_bytes = Add<HConstant>(size);
+  HInstruction* object = Add<HAllocate>(context,
+                                        size_in_bytes,
+                                        HType::JSObject(),
+                                        allocate_flags);
 
   // Copy the JS array part.
   for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
@@ -1646,8 +1617,7 @@
     // Get hold of the elements array of the boilerplate and setup the
     // elements pointer in the resulting object.
     HValue* boilerplate_elements = AddLoadElements(boilerplate);
-    HValue* object_elements =
-        AddInstruction(new(zone) HInnerAllocatedObject(object, elems_offset));
+    HValue* object_elements = Add<HInnerAllocatedObject>(object, elems_offset);
     AddStore(object, HObjectAccess::ForElementsPointer(), object_elements);
 
     // Copy the elements array header.
@@ -1661,16 +1631,10 @@
     // copying loops with constant length up to a given boundary and use this
     // helper here instead.
     for (int i = 0; i < length; i++) {
-      HValue* key_constant = AddInstruction(new(zone) HConstant(i));
-      HInstruction* value =
-          AddInstruction(new(zone) HLoadKeyed(boilerplate_elements,
-                                              key_constant,
-                                              NULL,
-                                              kind));
-      AddInstruction(new(zone) HStoreKeyed(object_elements,
-                                           key_constant,
-                                           value,
-                                           kind));
+      HValue* key_constant = Add<HConstant>(i);
+      HInstruction* value = Add<HLoadKeyed>(boilerplate_elements, key_constant,
+                                            static_cast<HValue*>(NULL), kind);
+      Add<HStoreKeyed>(object_elements, key_constant, value, kind);
     }
   }
 
@@ -1703,7 +1667,7 @@
     if_nil.Then();
     if_nil.Else();
     if (type->NumClasses() == 1) {
-      BuildCheckNonSmi(value);
+      BuildCheckHeapObject(value);
       // For ICs, the map checked below is a sentinel map that gets replaced by
       // the monomorphic map when the code is used as a template to generate a
       // new IC. For optimized functions, there is no sentinel map, the map
@@ -1721,9 +1685,8 @@
 HValue* HGraphBuilder::BuildCreateAllocationSiteInfo(HValue* previous_object,
                                                      int previous_object_size,
                                                      HValue* payload) {
-  HInnerAllocatedObject* alloc_site = new(zone())
-        HInnerAllocatedObject(previous_object, previous_object_size);
-  AddInstruction(alloc_site);
+  HInnerAllocatedObject* alloc_site = Add<HInnerAllocatedObject>(
+      previous_object, previous_object_size);
   Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_info_map());
   AddStoreMapConstant(alloc_site, alloc_site_map);
   HObjectAccess access = HObjectAccess::ForAllocationSitePayload();
@@ -1734,8 +1697,7 @@
 
 HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* context) {
   // Get the global context, then the native context
-  HInstruction* global_object = AddInstruction(new(zone())
-      HGlobalObject(context));
+  HInstruction* global_object = Add<HGlobalObject>(context);
   HObjectAccess access = HObjectAccess::ForJSObjectOffset(
       GlobalObject::kNativeContextOffset);
   return AddLoad(global_object, access);
@@ -1744,23 +1706,23 @@
 
 HInstruction* HGraphBuilder::BuildGetArrayFunction(HValue* context) {
   HInstruction* native_context = BuildGetNativeContext(context);
-  HInstruction* index = AddInstruction(new(zone())
-      HConstant(Context::ARRAY_FUNCTION_INDEX));
-
-  return AddInstruction(new (zone())
-      HLoadKeyed(native_context, index, NULL, FAST_ELEMENTS));
+  HInstruction* index =
+      Add<HConstant>(static_cast<int32_t>(Context::ARRAY_FUNCTION_INDEX));
+  return Add<HLoadKeyed>(
+      native_context, index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
 }
 
 
 HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
-                                              ElementsKind kind,
-                                              HValue* allocation_site_payload,
-                                              bool disable_allocation_sites) :
+    ElementsKind kind,
+    HValue* allocation_site_payload,
+    HValue* constructor_function,
+    AllocationSiteOverrideMode override_mode) :
         builder_(builder),
         kind_(kind),
         allocation_site_payload_(allocation_site_payload),
-        constructor_function_(NULL) {
-  mode_ = disable_allocation_sites
+        constructor_function_(constructor_function) {
+  mode_ = override_mode == DISABLE_ALLOCATION_SITES
       ? DONT_TRACK_ALLOCATION_SITE
       : AllocationSiteInfo::GetMode(kind);
 }
@@ -1778,26 +1740,35 @@
 
 
 HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode(HValue* context) {
+  if (kind_ == GetInitialFastElementsKind()) {
+    // No need for a context lookup if the kind_ matches the initial
+    // map, because we can just load the map in that case.
+    HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
+    HInstruction* load =
+        builder()->BuildLoadNamedField(constructor_function_,
+                                       access,
+                                       Representation::Tagged());
+    return builder()->AddInstruction(load);
+  }
+
   HInstruction* native_context = builder()->BuildGetNativeContext(context);
+  HInstruction* index = builder()->Add<HConstant>(
+      static_cast<int32_t>(Context::JS_ARRAY_MAPS_INDEX));
 
-  HInstruction* index = builder()->AddInstruction(new(zone())
-      HConstant(Context::JS_ARRAY_MAPS_INDEX));
+  HInstruction* map_array = builder()->Add<HLoadKeyed>(
+      native_context, index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
 
-  HInstruction* map_array = builder()->AddInstruction(new(zone())
-      HLoadKeyed(native_context, index, NULL, FAST_ELEMENTS));
+  HInstruction* kind_index = builder()->Add<HConstant>(kind_);
 
-  HInstruction* kind_index = builder()->AddInstruction(new(zone())
-      HConstant(kind_));
-
-  return builder()->AddInstruction(new(zone())
-      HLoadKeyed(map_array, kind_index, NULL, FAST_ELEMENTS));
+  return builder()->Add<HLoadKeyed>(
+      map_array, kind_index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
 }
 
 
 HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
   // Find the map near the constructor function
   HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
-  return AddInstruction(
+  return builder()->AddInstruction(
       builder()->BuildLoadNamedField(constructor_function_,
                                      access,
                                      Representation::Tagged()));
@@ -1820,18 +1791,17 @@
     base_size += FixedArray::kHeaderSize;
   }
 
-  HInstruction* elements_size_value = new(zone()) HConstant(elements_size());
-  AddInstruction(elements_size_value);
+  HInstruction* elements_size_value =
+      builder()->Add<HConstant>(elements_size());
   HInstruction* mul = HMul::New(zone(), context, length_node,
                                 elements_size_value);
   mul->ClearFlag(HValue::kCanOverflow);
-  AddInstruction(mul);
+  builder()->AddInstruction(mul);
 
-  HInstruction* base = new(zone()) HConstant(base_size);
-  AddInstruction(base);
+  HInstruction* base = builder()->Add<HConstant>(base_size);
   HInstruction* total_size = HAdd::New(zone(), context, base, mul);
   total_size->ClearFlag(HValue::kCanOverflow);
-  AddInstruction(total_size);
+  builder()->AddInstruction(total_size);
   return total_size;
 }
 
@@ -1846,16 +1816,13 @@
       ? FixedDoubleArray::SizeFor(initial_capacity())
       : FixedArray::SizeFor(initial_capacity());
 
-  HConstant* array_size = new(zone()) HConstant(base_size);
-  AddInstruction(array_size);
-  return array_size;
+  return builder()->Add<HConstant>(base_size);
 }
 
 
 HValue* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() {
   HValue* size_in_bytes = EstablishEmptyArrayAllocationSize();
-  HConstant* capacity = new(zone()) HConstant(initial_capacity());
-  AddInstruction(capacity);
+  HConstant* capacity = builder()->Add<HConstant>(initial_capacity());
   return AllocateArray(size_in_bytes,
                        capacity,
                        builder()->graph()->GetConstant0(),
@@ -1879,13 +1846,12 @@
 
   // Allocate (dealing with failure appropriately)
   HAllocate::Flags flags = HAllocate::DefaultFlags(kind_);
-  HAllocate* new_object = new(zone()) HAllocate(context, size_in_bytes,
-                                                HType::JSArray(), flags);
-  AddInstruction(new_object);
+  HAllocate* new_object = builder()->Add<HAllocate>(context, size_in_bytes,
+                                                    HType::JSArray(), flags);
 
   // Fill in the fields: map, properties, length
   HValue* map;
-  if (constructor_function_ != NULL) {
+  if (allocation_site_payload_ == NULL) {
     map = EmitInternalMapCode();
   } else {
     map = EmitMapCode(context);
@@ -1912,10 +1878,7 @@
                                           HObjectAccess access,
                                           HValue *val,
                                           Representation representation) {
-  HStoreNamedField *instr = new(zone())
-      HStoreNamedField(object, access, val, representation);
-  AddInstruction(instr);
-  return instr;
+  return Add<HStoreNamedField>(object, access, val, representation);
 }
 
 
@@ -1923,20 +1886,14 @@
                                         HObjectAccess access,
                                         HValue *typecheck,
                                         Representation representation) {
-  HLoadNamedField *instr =
-      new(zone()) HLoadNamedField(object, access, typecheck, representation);
-  AddInstruction(instr);
-  return instr;
+  return Add<HLoadNamedField>(object, access, typecheck, representation);
 }
 
 
 HStoreNamedField* HGraphBuilder::AddStoreMapConstant(HValue *object,
                                                      Handle<Map> map) {
-  HValue* constant = AddInstruction(new(zone()) HConstant(map));
-  HStoreNamedField *instr =
-      new(zone()) HStoreNamedField(object, HObjectAccess::ForMap(), constant);
-  AddInstruction(instr);
-  return instr;
+  return Add<HStoreNamedField>(object, HObjectAccess::ForMap(),
+                               Add<HConstant>(map));
 }
 
 
@@ -2384,7 +2341,7 @@
 
 
 void HGraph::OrderBlocks() {
-  HPhase phase("H_Block ordering", isolate());
+  CompilationPhase phase("H_Block ordering", info());
   BitVector visited(blocks_.length(), zone());
 
   ZoneList<HBasicBlock*> reverse_result(8, zone());
@@ -2689,7 +2646,7 @@
 void HRangeAnalysis::InferControlFlowRange(HCompareIDAndBranch* test,
                                            HBasicBlock* dest) {
   ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
-  if (test->representation().IsInteger32()) {
+  if (test->representation().IsSmiOrInteger32()) {
     Token::Value op = test->token();
     if (test->SecondSuccessor() == dest) {
       op = Token::NegateCompareOp(op);
@@ -2821,145 +2778,6 @@
 }
 
 
-void HInferRepresentation::AddToWorklist(HValue* current) {
-  if (current->representation().IsTagged()) return;
-  if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
-  if (in_worklist_.Contains(current->id())) return;
-  worklist_.Add(current, zone());
-  in_worklist_.Add(current->id());
-}
-
-
-void HInferRepresentation::Analyze() {
-  HPhase phase("H_Infer representations", graph_);
-
-  // (1) Initialize bit vectors and count real uses. Each phi gets a
-  // bit-vector of length <number of phis>.
-  const ZoneList<HPhi*>* phi_list = graph_->phi_list();
-  int phi_count = phi_list->length();
-  ZoneList<BitVector*> connected_phis(phi_count, graph_->zone());
-  for (int i = 0; i < phi_count; ++i) {
-    phi_list->at(i)->InitRealUses(i);
-    BitVector* connected_set = new(zone()) BitVector(phi_count, graph_->zone());
-    connected_set->Add(i);
-    connected_phis.Add(connected_set, zone());
-  }
-
-  // (2) Do a fixed point iteration to find the set of connected phis.  A
-  // phi is connected to another phi if its value is used either directly or
-  // indirectly through a transitive closure of the def-use relation.
-  bool change = true;
-  while (change) {
-    change = false;
-    // We normally have far more "forward edges" than "backward edges",
-    // so we terminate faster when we walk backwards.
-    for (int i = phi_count - 1; i >= 0; --i) {
-      HPhi* phi = phi_list->at(i);
-      for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
-        HValue* use = it.value();
-        if (use->IsPhi()) {
-          int id = HPhi::cast(use)->phi_id();
-          if (connected_phis[i]->UnionIsChanged(*connected_phis[id]))
-            change = true;
-        }
-      }
-    }
-  }
-
-  // Set truncation flags for groups of connected phis. This is a conservative
-  // approximation; the flag will be properly re-computed after representations
-  // have been determined.
-  if (phi_count > 0) {
-    BitVector* done = new(zone()) BitVector(phi_count, graph_->zone());
-    for (int i = 0; i < phi_count; ++i) {
-      if (done->Contains(i)) continue;
-
-      // Check if all uses of all connected phis in this group are truncating.
-      bool all_uses_everywhere_truncating = true;
-      for (BitVector::Iterator it(connected_phis.at(i));
-           !it.Done();
-           it.Advance()) {
-        int index = it.Current();
-        all_uses_everywhere_truncating &=
-            phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToInt32);
-        done->Add(index);
-      }
-      if (all_uses_everywhere_truncating) {
-        continue;  // Great, nothing to do.
-      }
-      // Clear truncation flag of this group of connected phis.
-      for (BitVector::Iterator it(connected_phis.at(i));
-           !it.Done();
-           it.Advance()) {
-        int index = it.Current();
-        phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
-      }
-    }
-  }
-
-  // Simplify constant phi inputs where possible.
-  // This step uses kTruncatingToInt32 flags of phis.
-  for (int i = 0; i < phi_count; ++i) {
-    phi_list->at(i)->SimplifyConstantInputs();
-  }
-
-  // Use the phi reachability information from step 2 to
-  // sum up the non-phi use counts of all connected phis.
-  for (int i = 0; i < phi_count; ++i) {
-    HPhi* phi = phi_list->at(i);
-    for (BitVector::Iterator it(connected_phis.at(i));
-         !it.Done();
-         it.Advance()) {
-      int index = it.Current();
-      HPhi* it_use = phi_list->at(index);
-      if (index != i) phi->AddNonPhiUsesFrom(it_use);  // Don't count twice.
-    }
-  }
-
-  // Initialize work list
-  for (int i = 0; i < graph_->blocks()->length(); ++i) {
-    HBasicBlock* block = graph_->blocks()->at(i);
-    const ZoneList<HPhi*>* phis = block->phis();
-    for (int j = 0; j < phis->length(); ++j) {
-      AddToWorklist(phis->at(j));
-    }
-
-    HInstruction* current = block->first();
-    while (current != NULL) {
-      AddToWorklist(current);
-      current = current->next();
-    }
-  }
-
-  // Do a fixed point iteration, trying to improve representations
-  while (!worklist_.is_empty()) {
-    HValue* current = worklist_.RemoveLast();
-    in_worklist_.Remove(current->id());
-    current->InferRepresentation(this);
-  }
-
-  // Lastly: any instruction that we don't have representation information
-  // for defaults to Tagged.
-  for (int i = 0; i < graph_->blocks()->length(); ++i) {
-    HBasicBlock* block = graph_->blocks()->at(i);
-    const ZoneList<HPhi*>* phis = block->phis();
-    for (int j = 0; j < phis->length(); ++j) {
-      HPhi* phi = phis->at(j);
-      if (phi->representation().IsNone()) {
-        phi->ChangeRepresentation(Representation::Tagged());
-      }
-    }
-    for (HInstruction* current = block->first();
-         current != NULL; current = current->next()) {
-      if (current->representation().IsNone() &&
-          current->CheckFlag(HInstruction::kFlexibleRepresentation)) {
-        current->ChangeRepresentation(Representation::Tagged());
-      }
-    }
-  }
-}
-
-
 void HGraph::MergeRemovableSimulates() {
   HPhase phase("H_Merge removable simulates", this);
   ZoneList<HSimulate*> mergelist(2, zone());
@@ -3847,7 +3665,7 @@
 
 void HOptimizedGraphBuilder::VisitArgument(Expression* expr) {
   CHECK_ALIVE(VisitForValue(expr));
-  Push(AddInstruction(new(zone()) HPushArgument(Pop())));
+  Push(Add<HPushArgument>(Pop()));
 }
 
 
@@ -3913,8 +3731,7 @@
   AddSimulate(BailoutId::Declarations());
 
   HValue* context = environment()->LookupContext();
-  AddInstruction(
-      new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
+  Add<HStackCheck>(context, HStackCheck::kFunctionEntry);
 
   VisitStatements(current_info()->function()->body());
   if (HasStackOverflow()) return false;
@@ -3942,21 +3759,6 @@
 }
 
 
-// Perform common subexpression elimination and loop-invariant code motion.
-void HGraph::GlobalValueNumbering() {
-  HPhase phase("H_Global value numbering", this);
-  HGlobalValueNumberer gvn(this, info());
-  bool removed_side_effects = gvn.Analyze();
-  // Trigger a second analysis pass to further eliminate duplicate values that
-  // could only be discovered by removing side-effect-generating instructions
-  // during the first pass.
-  if (FLAG_smi_only_arrays && removed_side_effects) {
-    removed_side_effects = gvn.Analyze();
-    ASSERT(!removed_side_effects);
-  }
-}
-
-
 bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
   *bailout_reason = SmartArrayPointer<char>();
   OrderBlocks();
@@ -3973,9 +3775,8 @@
   Verify(true);
 #endif
 
-  if (FLAG_analyze_environment_liveness) {
-    EnvironmentSlotLivenessAnalyzer esla(this);
-    esla.AnalyzeAndTrim();
+  if (FLAG_analyze_environment_liveness && maximum_environment_size() != 0) {
+    Run<HEnvironmentLivenessAnalysisPhase>();
   }
 
   PropagateDeoptimizingMark();
@@ -4005,8 +3806,7 @@
     }
   }
 
-  HInferRepresentation rep(this);
-  rep.Analyze();
+  Run<HInferRepresentationPhase>();
 
   // Remove HSimulate instructions that have turned out not to be needed
   // after all by folding them into the following HSimulate.
@@ -4025,7 +3825,7 @@
 
   if (FLAG_use_canonicalizing) Canonicalize();
 
-  if (FLAG_use_gvn) GlobalValueNumbering();
+  if (FLAG_use_gvn) Run<HGlobalValueNumberingPhase>();
 
   if (FLAG_use_range) {
     HRangeAnalysis rangeAnalysis(this);
@@ -4647,12 +4447,6 @@
 }
 
 
-void HOptimizedGraphBuilder::AddPhi(HPhi* instr) {
-  ASSERT(current_block() != NULL);
-  current_block()->AddPhi(instr);
-}
-
-
 void HOptimizedGraphBuilder::PushAndAdd(HInstruction* instr) {
   Push(instr);
   AddInstruction(instr);
@@ -4660,9 +4454,11 @@
 
 
 void HOptimizedGraphBuilder::AddSoftDeoptimize() {
+  isolate()->counters()->soft_deopts_requested()->Increment();
   if (FLAG_always_opt) return;
   if (current_block()->IsDeoptimizing()) return;
-  AddInstruction(new(zone()) HSoftDeoptimize());
+  Add<HSoftDeoptimize>();
+  isolate()->counters()->soft_deopts_inserted()->Increment();
   current_block()->MarkAsDeoptimizing();
   graph()->set_has_soft_deoptimize(true);
 }
@@ -4677,16 +4473,15 @@
   }
 
   while (!arguments.is_empty()) {
-    AddInstruction(new(zone()) HPushArgument(arguments.RemoveLast()));
+    Add<HPushArgument>(arguments.RemoveLast());
   }
   return call;
 }
 
 
 void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
-  HConstant* undefined_constant = new(zone()) HConstant(
+  HConstant* undefined_constant = Add<HConstant>(
       isolate()->factory()->undefined_value());
-  AddInstruction(undefined_constant);
   graph()->set_undefined_constant(undefined_constant);
 
   // Create an arguments object containing the initial parameters.  Set the
@@ -4695,7 +4490,7 @@
   HArgumentsObject* arguments_object =
       new(zone()) HArgumentsObject(environment()->parameter_count(), zone());
   for (int i = 0; i < environment()->parameter_count(); ++i) {
-    HInstruction* parameter = AddInstruction(new(zone()) HParameter(i));
+    HInstruction* parameter = Add<HParameter>(i);
     arguments_object->AddArgument(parameter, zone());
     environment()->Bind(i, parameter);
   }
@@ -4703,7 +4498,7 @@
   graph()->SetArgumentsObject(arguments_object);
 
   // First special is HContext.
-  HInstruction* context = AddInstruction(new(zone()) HContext);
+  HInstruction* context = Add<HContext>();
   environment()->BindContext(context);
 
   // Initialize specials and locals to undefined.
@@ -5015,7 +4810,7 @@
     HControlInstruction* compare;
 
     if (stmt->switch_type() == SwitchStatement::SMI_SWITCH) {
-      if (!clause->compare_type()->Is(Type::Integer31())) {
+      if (!clause->compare_type()->Is(Type::Smi())) {
         AddSoftDeoptimize();
       }
 
@@ -5142,8 +4937,7 @@
       new(zone()) ZoneList<HUnknownOSRValue*>(length, zone());
 
   for (int i = 0; i < first_expression_index; ++i) {
-    HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
-    AddInstruction(osr_value);
+    HUnknownOSRValue* osr_value = Add<HUnknownOSRValue>();
     environment()->Bind(i, osr_value);
     osr_values->Add(osr_value, zone());
   }
@@ -5151,8 +4945,7 @@
   if (first_expression_index != length) {
     environment()->Drop(length - first_expression_index);
     for (int i = first_expression_index; i < length; ++i) {
-      HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
-      AddInstruction(osr_value);
+      HUnknownOSRValue* osr_value = Add<HUnknownOSRValue>();
       environment()->Push(osr_value);
       osr_values->Add(osr_value, zone());
     }
@@ -5161,9 +4954,8 @@
   graph()->set_osr_values(osr_values);
 
   AddSimulate(osr_entry_id);
-  AddInstruction(new(zone()) HOsrEntry(osr_entry_id));
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  Add<HOsrEntry>(osr_entry_id);
+  HContext* context = Add<HContext>();
   environment()->BindContext(context);
   current_block()->Goto(loop_predecessor);
   loop_predecessor->SetJoinId(statement->EntryId());
@@ -5178,9 +4970,8 @@
   BreakAndContinueScope push(break_info, this);
   AddSimulate(stmt->StackCheckId());
   HValue* context = environment()->LookupContext();
-  HStackCheck* stack_check =
-    new(zone()) HStackCheck(context, HStackCheck::kBackwardsBranch);
-  AddInstruction(stack_check);
+  HStackCheck* stack_check = Add<HStackCheck>(
+      context, HStackCheck::kBackwardsBranch);
   ASSERT(loop_entry->IsLoopHeader());
   loop_entry->loop_information()->set_stack_check(stack_check);
   CHECK_BAILOUT(Visit(stmt->body()));
@@ -5349,30 +5140,24 @@
   CHECK_ALIVE(VisitForValue(stmt->enumerable()));
   HValue* enumerable = Top();  // Leave enumerable at the top.
 
-  HInstruction* map = AddInstruction(new(zone()) HForInPrepareMap(
-      environment()->LookupContext(), enumerable));
+  HInstruction* map = Add<HForInPrepareMap>(
+      environment()->LookupContext(), enumerable);
   AddSimulate(stmt->PrepareId());
 
-  HInstruction* array = AddInstruction(
-      new(zone()) HForInCacheArray(
-          enumerable,
-          map,
-          DescriptorArray::kEnumCacheBridgeCacheIndex));
+  HInstruction* array = Add<HForInCacheArray>(
+      enumerable, map, DescriptorArray::kEnumCacheBridgeCacheIndex);
 
-  HInstruction* enum_length = AddInstruction(new(zone()) HMapEnumLength(map));
+  HInstruction* enum_length = Add<HMapEnumLength>(map);
 
-  HInstruction* start_index = AddInstruction(new(zone()) HConstant(0));
+  HInstruction* start_index = Add<HConstant>(0);
 
   Push(map);
   Push(array);
   Push(enum_length);
   Push(start_index);
 
-  HInstruction* index_cache = AddInstruction(
-      new(zone()) HForInCacheArray(
-          enumerable,
-          map,
-          DescriptorArray::kEnumCacheBridgeIndicesCacheIndex));
+  HInstruction* index_cache = Add<HForInCacheArray>(
+      enumerable, map, DescriptorArray::kEnumCacheBridgeIndicesCacheIndex);
   HForInCacheArray::cast(array)->set_index_cache(
       HForInCacheArray::cast(index_cache));
 
@@ -5403,18 +5188,16 @@
 
   set_current_block(loop_body);
 
-  HValue* key = AddInstruction(
-      new(zone()) HLoadKeyed(
-          environment()->ExpressionStackAt(2),  // Enum cache.
-          environment()->ExpressionStackAt(0),  // Iteration index.
-          environment()->ExpressionStackAt(0),
-          FAST_ELEMENTS));
+  HValue* key = Add<HLoadKeyed>(
+      environment()->ExpressionStackAt(2),  // Enum cache.
+      environment()->ExpressionStackAt(0),  // Iteration index.
+      environment()->ExpressionStackAt(0),
+      FAST_ELEMENTS);
 
   // Check if the expected map still matches that of the enumerable.
   // If not just deoptimize.
-  AddInstruction(new(zone()) HCheckMapValue(
-      environment()->ExpressionStackAt(4),
-      environment()->ExpressionStackAt(3)));
+  Add<HCheckMapValue>(environment()->ExpressionStackAt(4),
+                      environment()->ExpressionStackAt(3));
 
   Bind(each_var, key);
 
@@ -5587,9 +5370,7 @@
   HValue* context = environment()->LookupContext();
   int length = current_info()->scope()->ContextChainLength(var->scope());
   while (length-- > 0) {
-    HInstruction* context_instruction = new(zone()) HOuterContext(context);
-    AddInstruction(context_instruction);
-    context = context_instruction;
+    context = Add<HOuterContext>(context);
   }
   return context;
 }
@@ -5671,7 +5452,7 @@
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  HConstant* instr = new(zone()) HConstant(expr->handle());
+  HConstant* instr = new(zone()) HConstant(expr->value());
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
@@ -5895,23 +5676,18 @@
     flags |= expr->has_function()
         ? ObjectLiteral::kHasFunction : ObjectLiteral::kNoFlags;
 
-    AddInstruction(new(zone()) HPushArgument(AddInstruction(
-        new(zone()) HConstant(closure_literals))));
-    AddInstruction(new(zone()) HPushArgument(AddInstruction(
-        new(zone()) HConstant(literal_index))));
-    AddInstruction(new(zone()) HPushArgument(AddInstruction(
-        new(zone()) HConstant(constant_properties))));
-    AddInstruction(new(zone()) HPushArgument(AddInstruction(
-        new(zone()) HConstant(flags))));
+    Add<HPushArgument>(Add<HConstant>(closure_literals));
+    Add<HPushArgument>(Add<HConstant>(literal_index));
+    Add<HPushArgument>(Add<HConstant>(constant_properties));
+    Add<HPushArgument>(Add<HConstant>(flags));
 
     Runtime::FunctionId function_id =
         (expr->depth() > 1 || expr->may_store_doubles())
         ? Runtime::kCreateObjectLiteral : Runtime::kCreateObjectLiteralShallow;
-    literal = AddInstruction(
-        new(zone()) HCallRuntime(context,
-                                 isolate()->factory()->empty_string(),
-                                 Runtime::FunctionForId(function_id),
-                                 4));
+    literal = Add<HCallRuntime>(context,
+                                isolate()->factory()->empty_string(),
+                                Runtime::FunctionForId(function_id),
+                                4);
   }
 
   // The object is expected in the bailout environment during computation
@@ -5932,7 +5708,7 @@
         ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
-        if (key->handle()->IsInternalizedString()) {
+        if (key->value()->IsInternalizedString()) {
           if (property->emit_store()) {
             CHECK_ALIVE(VisitForValue(value));
             HValue* value = Pop();
@@ -5977,8 +5753,7 @@
     // of the object. This makes sure that the original object won't
     // be used by other optimized code before it is transformed
     // (e.g. because of code motion).
-    HToFastProperties* result = new(zone()) HToFastProperties(Pop());
-    AddInstruction(result);
+    HToFastProperties* result = Add<HToFastProperties>(Pop());
     return ast_context()->ReturnValue(result);
   } else {
     return ast_context()->ReturnValue(Pop());
@@ -6050,40 +5825,16 @@
     Handle<FixedArray> constants = isolate()->factory()->empty_fixed_array();
     int literal_index = expr->literal_index();
 
-    // TODO(mstarzinger): The following check and deopt is actually obsolete
-    // but test cases for the tick processor fails because profile differs.
-
-    // Deopt if the array literal boilerplate ElementsKind is of a type
-    // different than the expected one. The check isn't necessary if the
-    // boilerplate has already been converted to TERMINAL_FAST_ELEMENTS_KIND.
-    if (CanTransitionToMoreGeneralFastElementsKind(
-            boilerplate_elements_kind, true)) {
-      IfBuilder builder(this);
-      HValue* boilerplate = AddInstruction(new(zone())
-          HConstant(original_boilerplate_object));
-      HValue* elements_kind = AddInstruction(new(zone())
-          HElementsKind(boilerplate));
-      HValue* expected_kind = AddInstruction(new(zone())
-          HConstant(boilerplate_elements_kind));
-      builder.IfCompare(elements_kind, expected_kind, Token::EQ);
-      builder.Then();
-      builder.ElseDeopt();
-    }
-
-    AddInstruction(new(zone()) HPushArgument(AddInstruction(
-        new(zone()) HConstant(literals))));
-    AddInstruction(new(zone()) HPushArgument(AddInstruction(
-        new(zone()) HConstant(literal_index))));
-    AddInstruction(new(zone()) HPushArgument(AddInstruction(
-        new(zone()) HConstant(constants))));
+    Add<HPushArgument>(Add<HConstant>(literals));
+    Add<HPushArgument>(Add<HConstant>(literal_index));
+    Add<HPushArgument>(Add<HConstant>(constants));
 
     Runtime::FunctionId function_id = (expr->depth() > 1)
         ? Runtime::kCreateArrayLiteral : Runtime::kCreateArrayLiteralShallow;
-    literal = AddInstruction(
-        new(zone()) HCallRuntime(context,
-                                 isolate()->factory()->empty_string(),
-                                 Runtime::FunctionForId(function_id),
-                                 3));
+    literal = Add<HCallRuntime>(context,
+                                isolate()->factory()->empty_string(),
+                                Runtime::FunctionForId(function_id),
+                                3);
 
     // De-opt if elements kind changed from boilerplate_elements_kind.
     Handle<Map> map = Handle<Map>(original_boilerplate_object->map(),
@@ -6095,7 +5846,7 @@
   // of the property values and is the value of the entire expression.
   Push(literal);
   // The literal index is on the stack, too.
-  Push(AddInstruction(new(zone()) HConstant(expr->literal_index())));
+  Push(Add<HConstant>(expr->literal_index()));
 
   HInstruction* elements = NULL;
 
@@ -6111,7 +5862,7 @@
 
     elements = AddLoadElements(literal);
 
-    HValue* key = AddInstruction(new(zone()) HConstant(i));
+    HValue* key = Add<HConstant>(i);
 
     switch (boilerplate_elements_kind) {
       case FAST_SMI_ELEMENTS:
@@ -6120,11 +5871,8 @@
       case FAST_HOLEY_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case FAST_HOLEY_DOUBLE_ELEMENTS:
-        AddInstruction(new(zone()) HStoreKeyed(
-            elements,
-            key,
-            value,
-            boilerplate_elements_kind));
+        Add<HStoreKeyed>(elements, key, value,
+                         boilerplate_elements_kind);
         break;
       default:
         UNREACHABLE();
@@ -6178,14 +5926,14 @@
 
 
 void HOptimizedGraphBuilder::AddCheckMap(HValue* object, Handle<Map> map) {
-  BuildCheckNonSmi(object);
+  BuildCheckHeapObject(object);
   AddInstruction(HCheckMaps::New(object, map, zone()));
 }
 
 
 void HOptimizedGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
                                                          Handle<Map> map) {
-  BuildCheckNonSmi(object);
+  BuildCheckHeapObject(object);
   AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
 }
 
@@ -6221,11 +5969,9 @@
       ASSERT(proto->GetPrototype(isolate())->IsNull());
     }
     ASSERT(proto->IsJSObject());
-    AddInstruction(new(zone()) HCheckPrototypeMaps(
-        Handle<JSObject>(JSObject::cast(map->prototype())),
-        Handle<JSObject>(JSObject::cast(proto)),
-        zone(),
-        top_info()));
+    Add<HCheckPrototypeMaps>(Handle<JSObject>(JSObject::cast(map->prototype())),
+                             Handle<JSObject>(JSObject::cast(proto)),
+                             zone(), top_info());
   }
 
   HObjectAccess field_access = HObjectAccess::ForField(map, lookup, name);
@@ -6237,11 +5983,10 @@
     if (transition_to_field) {
       // The store requires a mutable HeapNumber to be allocated.
       NoObservableSideEffectsScope no_side_effects(this);
-      HInstruction* heap_number_size = AddInstruction(new(zone()) HConstant(
-          HeapNumber::kSize));
-      HInstruction* double_box = AddInstruction(new(zone()) HAllocate(
+      HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
+      HInstruction* double_box = Add<HAllocate>(
           environment()->LookupContext(), heap_number_size,
-          HType::HeapNumber(), HAllocate::CAN_ALLOCATE_IN_NEW_SPACE));
+          HType::HeapNumber(), HAllocate::CAN_ALLOCATE_IN_NEW_SPACE);
       AddStoreMapConstant(double_box, isolate()->factory()->heap_number_map());
       AddStore(double_box, HObjectAccess::ForHeapNumberValue(),
           value, Representation::Double());
@@ -6291,8 +6036,8 @@
     Handle<JSFunction> setter,
     Handle<JSObject> holder) {
   AddCheckConstantFunction(holder, object, map);
-  AddInstruction(new(zone()) HPushArgument(object));
-  AddInstruction(new(zone()) HPushArgument(value));
+  Add<HPushArgument>(object);
+  Add<HPushArgument>(value);
   return new(zone()) HCallConstantFunction(setter, 2);
 }
 
@@ -6354,7 +6099,7 @@
   if (count != types->length()) return NULL;
 
   // Everything matched; can use monomorphic load.
-  BuildCheckNonSmi(object);
+  BuildCheckHeapObject(object);
   AddInstruction(HCheckMaps::New(object, types, zone()));
   return BuildLoadNamedField(object, access, representation);
 }
@@ -6369,7 +6114,7 @@
       expr, object, types, name);
   if (instr == NULL) {
     // Something did not match; must use a polymorphic load.
-    BuildCheckNonSmi(object);
+    BuildCheckHeapObject(object);
     HValue* context = environment()->LookupContext();
     instr = new(zone()) HLoadNamedFieldPolymorphic(
         context, object, types, name, zone());
@@ -6425,7 +6170,7 @@
   if (count != types->length()) return false;
 
   // Everything matched; can use monomorphic store.
-  BuildCheckNonSmi(object);
+  BuildCheckHeapObject(object);
   AddInstruction(HCheckMaps::New(object, types, zone()));
   HInstruction* store;
   CHECK_ALIVE_OR_RETURN(
@@ -6464,7 +6209,7 @@
     LookupResult lookup(isolate());
     if (ComputeLoadStoreField(map, name, &lookup, true)) {
       if (count == 0) {
-        BuildCheckNonSmi(object);
+        BuildCheckHeapObject(object);
         join = graph()->CreateBasicBlock();
       }
       ++count;
@@ -6536,6 +6281,7 @@
     HValue* value = environment()->ExpressionStackAt(0);
     HValue* object = environment()->ExpressionStackAt(1);
 
+    if (expr->IsUninitialized()) AddSoftDeoptimize();
     return BuildStoreNamed(expr, expr->id(), expr->position(),
                            expr->AssignmentId(), prop, object, value);
   } else {
@@ -6571,25 +6317,19 @@
   if (type == kUseCell) {
     Handle<GlobalObject> global(current_info()->global_object());
     Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
-    HInstruction* instr =
-        new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
+    HInstruction* instr = Add<HStoreGlobalCell>(value, cell,
+                                                lookup.GetPropertyDetails());
     instr->set_position(position);
-    AddInstruction(instr);
     if (instr->HasObservableSideEffects()) {
       AddSimulate(ast_id, REMOVABLE_SIMULATE);
     }
   } else {
     HValue* context =  environment()->LookupContext();
-    HGlobalObject* global_object = new(zone()) HGlobalObject(context);
-    AddInstruction(global_object);
+    HGlobalObject* global_object = Add<HGlobalObject>(context);
     HStoreGlobalGeneric* instr =
-        new(zone()) HStoreGlobalGeneric(context,
-                                        global_object,
-                                        var->name(),
-                                        value,
-                                        function_strict_mode_flag());
+        Add<HStoreGlobalGeneric>(context, global_object, var->name(),
+                                 value, function_strict_mode_flag());
     instr->set_position(position);
-    AddInstruction(instr);
     ASSERT(instr->HasObservableSideEffects());
     AddSimulate(ast_id, REMOVABLE_SIMULATE);
   }
@@ -6604,7 +6344,7 @@
                                              HValue* object,
                                              HValue* value) {
   Literal* key = prop->key()->AsLiteral();
-  Handle<String> name = Handle<String>::cast(key->handle());
+  Handle<String> name = Handle<String>::cast(key->value());
   ASSERT(!name.is_null());
 
   HInstruction* instr = NULL;
@@ -6625,8 +6365,8 @@
         return;
       }
       Drop(2);
-      AddInstruction(new(zone()) HPushArgument(object));
-      AddInstruction(new(zone()) HPushArgument(value));
+      Add<HPushArgument>(object);
+      Add<HPushArgument>(value);
       instr = new(zone()) HCallConstantFunction(setter, 2);
     } else {
       Drop(2);
@@ -6723,9 +6463,8 @@
         }
 
         HValue* context = BuildContextChainWalk(var);
-        HStoreContextSlot* instr =
-            new(zone()) HStoreContextSlot(context, var->index(), mode, Top());
-        AddInstruction(instr);
+        HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
+                                                          mode, Top());
         if (instr->HasObservableSideEffects()) {
           AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
         }
@@ -6855,7 +6594,7 @@
         // We insert a use of the old value to detect unsupported uses of const
         // variables (e.g. initialization inside a loop).
         HValue* old_value = environment()->Lookup(var);
-        AddInstruction(new(zone()) HUseConst(old_value));
+        Add<HUseConst>(old_value);
       }
     } else if (var->mode() == CONST_HARMONY) {
       if (expr->op() != Token::INIT_CONST_HARMONY) {
@@ -6936,9 +6675,8 @@
         }
 
         HValue* context = BuildContextChainWalk(var);
-        HStoreContextSlot* instr = new(zone()) HStoreContextSlot(
-            context, var->index(), mode, Top());
-        AddInstruction(instr);
+        HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
+                                                          mode, Top());
         if (instr->HasObservableSideEffects()) {
           AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
         }
@@ -6972,9 +6710,8 @@
 
   HValue* context = environment()->LookupContext();
   HValue* value = environment()->Pop();
-  HThrow* instr = new(zone()) HThrow(context, value);
+  HThrow* instr = Add<HThrow>(context, value);
   instr->set_position(expr->position());
-  AddInstruction(instr);
   AddSimulate(expr->id());
   current_block()->FinishExit(new(zone()) HAbnormalExit);
   set_current_block(NULL);
@@ -7008,8 +6745,6 @@
     Property* expr) {
   if (expr->IsUninitialized()) {
     AddSoftDeoptimize();
-  } else {
-    // OS::DebugBreak();
   }
   HValue* context = environment()->LookupContext();
   return new(zone()) HLoadNamedGeneric(context, object, name);
@@ -7022,7 +6757,7 @@
     Handle<JSFunction> getter,
     Handle<JSObject> holder) {
   AddCheckConstantFunction(holder, object, map);
-  AddInstruction(new(zone()) HPushArgument(object));
+  Add<HPushArgument>(object);
   return new(zone()) HCallConstantFunction(getter, 1);
 }
 
@@ -7067,9 +6802,8 @@
     Handle<JSObject> holder(lookup.holder());
     Handle<Map> holder_map(holder->map());
     AddCheckMap(object, map);
-    AddInstruction(new(zone()) HCheckPrototypeMaps(
-        prototype, holder, zone(), top_info()));
-    HValue* holder_value = AddInstruction(new(zone()) HConstant(holder));
+    Add<HCheckPrototypeMaps>(prototype, holder, zone(), top_info());
+    HValue* holder_value = Add<HConstant>(holder);
     return BuildLoadNamedField(holder_value,
         HObjectAccess::ForField(holder_map, &lookup, name),
         ComputeLoadStoreRepresentation(map, &lookup));
@@ -7081,8 +6815,7 @@
     Handle<JSObject> holder(lookup.holder());
     Handle<Map> holder_map(holder->map());
     AddCheckMap(object, map);
-    AddInstruction(new(zone()) HCheckPrototypeMaps(
-        prototype, holder, zone(), top_info()));
+    Add<HCheckPrototypeMaps>(prototype, holder, zone(), top_info());
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*holder_map));
     return new(zone()) HConstant(function);
   }
@@ -7119,8 +6852,7 @@
       isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
     Handle<JSObject> prototype(JSObject::cast(map->prototype()), isolate());
     Handle<JSObject> object_prototype = isolate()->initial_object_prototype();
-    AddInstruction(new(zone()) HCheckPrototypeMaps(
-        prototype, object_prototype, zone(), top_info()));
+    Add<HCheckPrototypeMaps>(prototype, object_prototype, zone(), top_info());
     load_mode = ALLOW_RETURN_HOLE;
     graph()->MarkDependsOnEmptyArrayProtoElements();
   }
@@ -7199,7 +6931,7 @@
     KeyedAccessStoreMode store_mode,
     bool* has_side_effects) {
   *has_side_effects = false;
-  BuildCheckNonSmi(object);
+  BuildCheckHeapObject(object);
   SmallMapList* maps = prop->GetReceiverTypes();
   bool todo_external_array = false;
 
@@ -7252,9 +6984,8 @@
           map->elements_kind(),
           transition_target.at(i)->elements_kind()));
       HValue* context = environment()->LookupContext();
-      transition = new(zone()) HTransitionElementsKind(
-          context, object, map, transition_target.at(i));
-      AddInstruction(transition);
+      transition = Add<HTransitionElementsKind>(context, object, map,
+                                                transition_target.at(i));
     } else {
       type_todo[map->elements_kind()] = true;
       if (IsExternalArrayElementsKind(map->elements_kind())) {
@@ -7287,8 +7018,7 @@
       AddInstruction(HCheckInstanceType::NewIsSpecObject(object, zone()));
   HBasicBlock* join = graph()->CreateBasicBlock();
 
-  HInstruction* elements_kind_instr =
-      AddInstruction(new(zone()) HElementsKind(object));
+  HInstruction* elements_kind_instr = Add<HElementsKind>(object);
   HInstruction* elements = AddLoadElements(object, checkspec);
   HLoadExternalArrayPointer* external_elements = NULL;
   HInstruction* checked_key = NULL;
@@ -7309,11 +7039,9 @@
                   LAST_ELEMENTS_KIND);
     if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
         && todo_external_array) {
-      HInstruction* length =
-          AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
-      checked_key = AddBoundsCheck(key, length);
-      external_elements = new(zone()) HLoadExternalArrayPointer(elements);
-      AddInstruction(external_elements);
+      HInstruction* length = Add<HFixedArrayBaseLength>(elements);
+      checked_key = Add<HBoundsCheck>(key, length);
+      external_elements = Add<HLoadExternalArrayPointer>(elements);
     }
     if (type_todo[elements_kind]) {
       HBasicBlock* if_true = graph()->CreateBasicBlock();
@@ -7354,7 +7082,7 @@
             typecheck, Representation::Smi());
         length->set_type(HType::Smi());
 
-        checked_key = AddBoundsCheck(key, length);
+        checked_key = Add<HBoundsCheck>(key, length);
         access = AddInstruction(BuildFastElementAccess(
             elements, checked_key, val, elements_kind_branch,
             elements_kind, is_store, NEVER_RETURN_HOLE, STANDARD_STORE));
@@ -7372,7 +7100,7 @@
 
         set_current_block(if_fastobject);
         length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
-        checked_key = AddBoundsCheck(key, length);
+        checked_key = Add<HBoundsCheck>(key, length);
         access = AddInstruction(BuildFastElementAccess(
             elements, checked_key, val, elements_kind_branch,
             elements_kind, is_store, NEVER_RETURN_HOLE, STANDARD_STORE));
@@ -7424,7 +7152,7 @@
                        : BuildLoadKeyedGeneric(obj, key);
       AddInstruction(instr);
     } else {
-      BuildCheckNonSmi(obj);
+      BuildCheckHeapObject(obj);
       instr = BuildMonomorphicElementAccess(
           obj, key, val, NULL, map, is_store, expr->GetStoreMode());
     }
@@ -7504,8 +7232,7 @@
     if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("length"))) return false;
 
     if (function_state()->outer() == NULL) {
-      HInstruction* elements = AddInstruction(
-          new(zone()) HArgumentsElements(false));
+      HInstruction* elements = Add<HArgumentsElements>(false);
       result = new(zone()) HArgumentsLength(elements);
     } else {
       // Number of arguments without receiver.
@@ -7520,11 +7247,9 @@
     HValue* key = Pop();
     Drop(1);  // Arguments object.
     if (function_state()->outer() == NULL) {
-      HInstruction* elements = AddInstruction(
-          new(zone()) HArgumentsElements(false));
-      HInstruction* length = AddInstruction(
-          new(zone()) HArgumentsLength(elements));
-      HInstruction* checked_key = AddBoundsCheck(key, length);
+      HInstruction* elements = Add<HArgumentsElements>(false);
+      HInstruction* length = Add<HArgumentsLength>(elements);
+      HInstruction* checked_key = Add<HBoundsCheck>(key, length);
       result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
     } else {
       EnsureArgumentsArePushedForAccess();
@@ -7533,9 +7258,8 @@
       HInstruction* elements = function_state()->arguments_elements();
       int argument_count = environment()->
           arguments_environment()->parameter_count() - 1;
-      HInstruction* length = AddInstruction(new(zone()) HConstant(
-          argument_count));
-      HInstruction* checked_key = AddBoundsCheck(key, length);
+      HInstruction* length = Add<HConstant>(argument_count);
+      HInstruction* checked_key = Add<HBoundsCheck>(key, length);
       result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
     }
   }
@@ -7556,7 +7280,7 @@
   HInstruction* instr = NULL;
   if (expr->IsStringLength()) {
     HValue* string = Pop();
-    BuildCheckNonSmi(string);
+    BuildCheckHeapObject(string);
     AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
     instr = HStringLength::New(zone(), string);
   } else if (expr->IsStringAccess()) {
@@ -7571,7 +7295,7 @@
 
   } else if (expr->IsFunctionPrototype()) {
     HValue* function = Pop();
-    BuildCheckNonSmi(function);
+    BuildCheckHeapObject(function);
     instr = new(zone()) HLoadFunctionPrototype(function);
 
   } else if (expr->key()->IsPropertyName()) {
@@ -7594,7 +7318,7 @@
       if (LookupGetter(map, name, &getter, &holder)) {
         AddCheckConstantFunction(holder, Top(), map);
         if (FLAG_inline_accessors && TryInlineGetter(getter, expr)) return;
-        AddInstruction(new(zone()) HPushArgument(Pop()));
+        Add<HPushArgument>(Pop());
         instr = new(zone()) HCallConstantFunction(getter, 1);
       } else {
         instr = BuildLoadNamedMonomorphic(Pop(), name, expr, map);
@@ -7636,8 +7360,7 @@
                                                    Handle<Map> receiver_map) {
   if (!holder.is_null()) {
     Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
-    AddInstruction(new(zone()) HCheckPrototypeMaps(
-        prototype, holder, zone(), top_info()));
+    Add<HCheckPrototypeMaps>(prototype, holder, zone(), top_info());
   }
 }
 
@@ -7745,7 +7468,7 @@
         empty_smi_block->Goto(number_block);
         set_current_block(not_smi_block);
       } else {
-        BuildCheckNonSmi(receiver);
+        BuildCheckHeapObject(receiver);
       }
     }
     HBasicBlock* if_true = graph()->CreateBasicBlock();
@@ -8064,9 +7787,7 @@
   //
   // TODO(kmillikin): implement the same inlining on other platforms so we
   // can remove the unsightly ifdefs in this function.
-  HConstant* context =
-      new(zone()) HConstant(Handle<Context>(target->context()));
-  AddInstruction(context);
+  HConstant* context = Add<HConstant>(Handle<Context>(target->context()));
   inner_env->BindContext(context);
 #endif
 
@@ -8080,25 +7801,19 @@
     ASSERT(function->scope()->arguments()->IsStackAllocated());
     HEnvironment* arguments_env = inner_env->arguments_environment();
     int arguments_count = arguments_env->parameter_count();
-    arguments_object = new(zone()) HArgumentsObject(arguments_count, zone());
+    arguments_object = Add<HArgumentsObject>(arguments_count, zone());
     inner_env->Bind(function->scope()->arguments(), arguments_object);
     for (int i = 0; i < arguments_count; i++) {
       arguments_object->AddArgument(arguments_env->Lookup(i), zone());
     }
-    AddInstruction(arguments_object);
   }
 
   HEnterInlined* enter_inlined =
-      new(zone()) HEnterInlined(target,
-                                arguments_count,
-                                function,
-                                function_state()->inlining_kind(),
-                                function->scope()->arguments(),
-                                arguments_object,
-                                undefined_receiver,
-                                zone());
+      Add<HEnterInlined>(target, arguments_count, function,
+                         function_state()->inlining_kind(),
+                         function->scope()->arguments(),
+                         arguments_object, undefined_receiver, zone());
   function_state()->set_entry(enter_inlined);
-  AddInstruction(enter_inlined);
 
   VisitDeclarations(target_info.scope()->declarations());
   VisitStatements(function->body());
@@ -8336,12 +8051,9 @@
         HValue* string = Pop();
         HValue* context = environment()->LookupContext();
         ASSERT(!expr->holder().is_null());
-        AddInstruction(new(zone()) HCheckPrototypeMaps(
-            Call::GetPrototypeForPrimitiveCheck(STRING_CHECK,
-                expr->holder()->GetIsolate()),
-            expr->holder(),
-            zone(),
-            top_info()));
+        Add<HCheckPrototypeMaps>(Call::GetPrototypeForPrimitiveCheck(
+                STRING_CHECK, expr->holder()->GetIsolate()),
+            expr->holder(), zone(), top_info());
         HInstruction* char_code =
             BuildStringCharCodeAt(context, string, index);
         if (id == kStringCharCodeAt) {
@@ -8432,8 +8144,7 @@
         AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
         Drop(1);  // Receiver.
         HValue* context = environment()->LookupContext();
-        HGlobalObject* global_object = new(zone()) HGlobalObject(context);
-        AddInstruction(global_object);
+        HGlobalObject* global_object = Add<HGlobalObject>(context);
         HRandom* result = new(zone()) HRandom(global_object);
         ast_context()->ReturnInstruction(result, expr->id());
         return true;
@@ -8512,12 +8223,9 @@
   HValue* receiver = Pop();
 
   if (function_state()->outer() == NULL) {
-    HInstruction* elements = AddInstruction(
-        new(zone()) HArgumentsElements(false));
-    HInstruction* length =
-        AddInstruction(new(zone()) HArgumentsLength(elements));
-    HValue* wrapped_receiver =
-        AddInstruction(new(zone()) HWrapReceiver(receiver, function));
+    HInstruction* elements = Add<HArgumentsElements>(false);
+    HInstruction* length = Add<HArgumentsLength>(elements);
+    HValue* wrapped_receiver = Add<HWrapReceiver>(receiver, function);
     HInstruction* result =
         new(zone()) HApplyArguments(function,
                                     wrapped_receiver,
@@ -8742,14 +8450,12 @@
 
         CHECK_ALIVE(VisitForValue(expr->expression()));
         HValue* function = Pop();
-        AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
+        Add<HCheckFunction>(function, expr->target());
 
         // Replace the global object with the global receiver.
-        HGlobalReceiver* global_receiver =
-            new(zone()) HGlobalReceiver(global_object);
+        HGlobalReceiver* global_receiver = Add<HGlobalReceiver>(global_object);
         // Index of the receiver from the top of the expression stack.
         const int receiver_index = argument_count - 1;
-        AddInstruction(global_receiver);
         ASSERT(environment()->ExpressionStackAt(receiver_index)->
                IsGlobalObject());
         environment()->SetExpressionStackAt(receiver_index, global_receiver);
@@ -8768,12 +8474,19 @@
           graph()->MarkRecursive();
         }
 
-        call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
+        if (CallStubCompiler::HasCustomCallGenerator(expr->target())) {
+          // When the target has a custom call IC generator, use the IC,
+          // because it is likely to generate better code.
+          HValue* context = environment()->LookupContext();
+          call = PreProcessCall(
+              new(zone()) HCallNamed(context, var->name(), argument_count));
+        } else {
+          call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
                                                            argument_count));
+        }
       } else {
         HValue* context = environment()->LookupContext();
-        HGlobalObject* receiver = new(zone()) HGlobalObject(context);
-        AddInstruction(receiver);
+        HGlobalObject* receiver = Add<HGlobalObject>(context);
         PushAndAdd(new(zone()) HPushArgument(receiver));
         CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
@@ -8787,12 +8500,11 @@
       CHECK_ALIVE(VisitForValue(expr->expression()));
       HValue* function = Top();
       HValue* context = environment()->LookupContext();
-      HGlobalObject* global = new(zone()) HGlobalObject(context);
-      AddInstruction(global);
+      HGlobalObject* global = Add<HGlobalObject>(context);
       HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
       PushAndAdd(receiver);
       CHECK_ALIVE(VisitExpressions(expr->arguments()));
-      AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
+      Add<HCheckFunction>(function, expr->target());
 
       if (TryInlineBuiltinFunctionCall(expr, true)) {  // Drop the function.
         if (FLAG_trace_inlining) {
@@ -8818,10 +8530,8 @@
       CHECK_ALIVE(VisitForValue(expr->expression()));
       HValue* function = Top();
       HValue* context = environment()->LookupContext();
-      HGlobalObject* global_object = new(zone()) HGlobalObject(context);
-      AddInstruction(global_object);
-      HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
-      AddInstruction(receiver);
+      HGlobalObject* global_object = Add<HGlobalObject>(context);
+      HGlobalReceiver* receiver = Add<HGlobalReceiver>(global_object);
       PushAndAdd(new(zone()) HPushArgument(receiver));
       CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
@@ -8839,8 +8549,7 @@
 static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
   return constructor->has_initial_map() &&
       constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
-      constructor->initial_map()->instance_size() < HAllocate::kMaxInlineSize &&
-      constructor->initial_map()->InitialPropertiesLength() == 0;
+      constructor->initial_map()->instance_size() < HAllocateObject::kMaxSize;
 }
 
 
@@ -8850,7 +8559,6 @@
   ASSERT(current_block()->HasPredecessor());
   int argument_count = expr->arguments()->length() + 1;  // Plus constructor.
   HValue* context = environment()->LookupContext();
-  Factory* factory = isolate()->factory();
 
   if (FLAG_inline_construct &&
       expr->IsMonomorphic() &&
@@ -8861,8 +8569,7 @@
     HValue* function = Top();
     CHECK_ALIVE(VisitExpressions(expr->arguments()));
     Handle<JSFunction> constructor = expr->target();
-    HValue* check = AddInstruction(
-        new(zone()) HCheckFunction(function, constructor));
+    HValue* check = Add<HCheckFunction>(function, constructor);
 
     // Force completion of inobject slack tracking before generating
     // allocation code to finalize instance size.
@@ -8870,81 +8577,19 @@
       constructor->shared()->CompleteInobjectSlackTracking();
     }
 
-    // Calculate instance size from initial map of constructor.
-    ASSERT(constructor->has_initial_map());
-    Handle<Map> initial_map(constructor->initial_map());
-    int instance_size = initial_map->instance_size();
-    ASSERT(initial_map->InitialPropertiesLength() == 0);
-
-    // Allocate an instance of the implicit receiver object.
-    HValue* size_in_bytes =
-        AddInstruction(new(zone()) HConstant(instance_size));
-
-    HAllocate::Flags flags = HAllocate::DefaultFlags();
-    if (FLAG_pretenuring_call_new &&
-        isolate()->heap()->ShouldGloballyPretenure()) {
-      flags = static_cast<HAllocate::Flags>(
-          flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
-    }
-
-    HInstruction* receiver =
-        AddInstruction(new(zone()) HAllocate(context,
-                                             size_in_bytes,
-                                             HType::JSObject(),
-                                             flags));
-    HAllocate::cast(receiver)->set_known_initial_map(initial_map);
-
-    // Load the initial map from the constructor.
-    HValue* constructor_value =
-        AddInstruction(new(zone()) HConstant(constructor));
-    HValue* initial_map_value =
-        AddLoad(constructor_value, HObjectAccess::ForJSObjectOffset(
-            JSFunction::kPrototypeOrInitialMapOffset));
-
-    // Initialize map and fields of the newly allocated object.
-    { NoObservableSideEffectsScope no_effects(this);
-      ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
-      AddStore(receiver,
-               HObjectAccess::ForJSObjectOffset(JSObject::kMapOffset),
-               initial_map_value);
-      HValue* empty_fixed_array =
-          AddInstruction(new(zone()) HConstant(factory->empty_fixed_array()));
-      AddStore(receiver,
-               HObjectAccess::ForJSObjectOffset(JSObject::kPropertiesOffset),
-               empty_fixed_array);
-      AddStore(receiver,
-               HObjectAccess::ForJSObjectOffset(JSObject::kElementsOffset),
-               empty_fixed_array);
-      if (initial_map->inobject_properties() != 0) {
-        HConstant* undefined = graph()->GetConstantUndefined();
-        for (int i = 0; i < initial_map->inobject_properties(); i++) {
-          int property_offset = JSObject::kHeaderSize + i * kPointerSize;
-          AddStore(receiver,
-                   HObjectAccess::ForJSObjectOffset(property_offset),
-                   undefined);
-        }
-      }
-    }
-
-    // Replace the constructor function with a newly allocated receiver using
-    // the index of the receiver from the top of the expression stack.
+    // Replace the constructor function with a newly allocated receiver.
+    HInstruction* receiver = Add<HAllocateObject>(context, constructor);
+    // Index of the receiver from the top of the expression stack.
     const int receiver_index = argument_count - 1;
     ASSERT(environment()->ExpressionStackAt(receiver_index) == function);
     environment()->SetExpressionStackAt(receiver_index, receiver);
 
     if (TryInlineConstruct(expr, receiver)) return;
 
-    // TODO(mstarzinger): For now we remove the previous HAllocate and all
-    // corresponding instructions and instead add HPushArgument for the
-    // arguments in case inlining failed.  What we actually should do is for
-    // inlining to try to build a subgraph without mutating the parent graph.
-    HInstruction* instr = current_block()->last();
-    while (instr != initial_map_value) {
-      HInstruction* prev_instr = instr->previous();
-      instr->DeleteAndReplaceWith(NULL);
-      instr = prev_instr;
-    }
-    initial_map_value->DeleteAndReplaceWith(NULL);
+    // TODO(mstarzinger): For now we remove the previous HAllocateObject and
+    // add HPushArgument for the arguments in case inlining failed.  What we
+    // actually should do is emit HInvokeFunction on the constructor instead
+    // of using HCallNew as a fallback.
     receiver->DeleteAndReplaceWith(NULL);
     check->DeleteAndReplaceWith(NULL);
     environment()->SetExpressionStackAt(receiver_index, function);
@@ -8955,21 +8600,17 @@
   } else {
     // The constructor function is both an operand to the instruction and an
     // argument to the construct call.
-    Handle<JSFunction> array_function =
-        Handle<JSFunction>(isolate()->global_context()->array_function(),
-                           isolate());
-    bool use_call_new_array = FLAG_optimize_constructed_arrays &&
-        expr->target().is_identical_to(array_function);
-
+    Handle<JSFunction> array_function(
+        isolate()->global_context()->array_function(), isolate());
     CHECK_ALIVE(VisitArgument(expr->expression()));
     HValue* constructor = HPushArgument::cast(Top())->argument();
     CHECK_ALIVE(VisitArgumentList(expr->arguments()));
     HCallNew* call;
-    if (use_call_new_array) {
+    if (expr->target().is_identical_to(array_function)) {
       Handle<Cell> cell = expr->allocation_info_cell();
-      AddInstruction(new(zone()) HCheckFunction(constructor, array_function));
+      Add<HCheckFunction>(constructor, array_function);
       call = new(zone()) HCallNewArray(context, constructor, argument_count,
-                                       cell);
+                                       cell, expr->elements_kind());
     } else {
       call = new(zone()) HCallNew(context, constructor, argument_count);
     }
@@ -9106,11 +8747,10 @@
   HValue* context = environment()->LookupContext();
   HInstruction* instr =
       HMul::New(zone(), context, value, graph()->GetConstantMinus1());
-  Handle<Type> type = expr->type();
-  Representation rep = ToRepresentation(type);
-  if (type->Is(Type::None())) {
+  Handle<Type> operand_type = expr->expression()->lower_type();
+  Representation rep = ToRepresentation(operand_type);
+  if (operand_type->Is(Type::None())) {
     AddSoftDeoptimize();
-    type = handle(Type::Any(), isolate());
   }
   if (instr->IsBinaryOperation()) {
     HBinaryOperation::cast(instr)->set_observed_input_representation(1, rep);
@@ -9123,8 +8763,8 @@
 void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->expression()));
   HValue* value = Pop();
-  Handle<Type> info = expr->type();
-  if (info->Is(Type::None())) {
+  Handle<Type> operand_type = expr->expression()->lower_type();
+  if (operand_type->Is(Type::None())) {
     AddSoftDeoptimize();
   }
   HInstruction* instr = new(zone()) HBitNot(value);
@@ -9182,8 +8822,8 @@
   // The input to the count operation is on top of the expression stack.
   TypeInfo info = expr->type();
   Representation rep = ToRepresentation(info);
-  if (rep.IsTagged()) {
-    rep = Representation::Integer32();
+  if (rep.IsNone() || rep.IsTagged()) {
+    rep = Representation::Smi();
   }
 
   if (returns_original_input) {
@@ -9191,8 +8831,11 @@
     // actual HChange instruction we need is (sometimes) added in a later
     // phase, so it is not available now to be used as an input to HAdd and
     // as the return value.
-    HInstruction* number_input = new(zone()) HForceRepresentation(Pop(), rep);
-    AddInstruction(number_input);
+    HInstruction* number_input = Add<HForceRepresentation>(Pop(), rep);
+    if (!rep.IsDouble()) {
+      number_input->SetFlag(HInstruction::kFlexibleRepresentation);
+      number_input->SetFlag(HInstruction::kCannotBeTagged);
+    }
     Push(number_input);
   }
 
@@ -9204,10 +8847,7 @@
       : graph()->GetConstantMinus1();
   HValue* context = environment()->LookupContext();
   HInstruction* instr = HAdd::New(zone(), context, Top(), delta);
-  // We can't insert a simulate here, because it would break deoptimization,
-  // so the HAdd must not have side effects, so we must freeze its
-  // representation.
-  instr->AssumeRepresentation(rep);
+  instr->SetFlag(HInstruction::kCannotBeTagged);
   instr->ClearAllSideEffects();
   AddInstruction(instr);
   return instr;
@@ -9278,9 +8918,8 @@
         HValue* context = BuildContextChainWalk(var);
         HStoreContextSlot::Mode mode = IsLexicalVariableMode(var->mode())
             ? HStoreContextSlot::kCheckDeoptimize : HStoreContextSlot::kNoCheck;
-        HStoreContextSlot* instr =
-            new(zone()) HStoreContextSlot(context, var->index(), mode, after);
-        AddInstruction(instr);
+        HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
+                                                          mode, after);
         if (instr->HasObservableSideEffects()) {
           AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
         }
@@ -9415,11 +9054,11 @@
       return new(zone()) HConstant(s->Get(i));
     }
   }
-  BuildCheckNonSmi(string);
+  BuildCheckHeapObject(string);
   AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
   HInstruction* length = HStringLength::New(zone(), string);
   AddInstruction(length);
-  HInstruction* checked_index = AddBoundsCheck(index, length);
+  HInstruction* checked_index = Add<HBoundsCheck>(index, length);
   return new(zone()) HStringCharCodeAt(context, string, checked_index);
 }
 
@@ -9486,16 +9125,16 @@
     HValue* left,
     HValue* right) {
   HValue* context = environment()->LookupContext();
-  Handle<Type> left_type = expr->left_type();
-  Handle<Type> right_type = expr->right_type();
-  Handle<Type> result_type = expr->result_type();
-  bool has_fixed_right_arg = expr->has_fixed_right_arg();
-  int fixed_right_arg_value = expr->fixed_right_arg_value();
+  Handle<Type> left_type = expr->left()->lower_type();
+  Handle<Type> right_type = expr->right()->lower_type();
+  Handle<Type> result_type = expr->lower_type();
+  Maybe<int> fixed_right_arg = expr->fixed_right_arg();
   Representation left_rep = ToRepresentation(left_type);
   Representation right_rep = ToRepresentation(right_type);
   Representation result_rep = ToRepresentation(result_type);
   if (left_type->Is(Type::None())) {
     AddSoftDeoptimize();
+    // TODO(rossberg): we should be able to get rid of non-continuous defaults.
     left_type = handle(Type::Any(), isolate());
   }
   if (right_type->Is(Type::None())) {
@@ -9506,9 +9145,9 @@
   switch (expr->op()) {
     case Token::ADD:
       if (left_type->Is(Type::String()) && right_type->Is(Type::String())) {
-        BuildCheckNonSmi(left);
+        BuildCheckHeapObject(left);
         AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
-        BuildCheckNonSmi(right);
+        BuildCheckHeapObject(right);
         AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
         instr = HStringAdd::New(zone(), context, left, right);
       } else {
@@ -9522,12 +9161,7 @@
       instr = HMul::New(zone(), context, left, right);
       break;
     case Token::MOD:
-      instr = HMod::New(zone(),
-                        context,
-                        left,
-                        right,
-                        has_fixed_right_arg,
-                        fixed_right_arg_value);
+      instr = HMod::New(zone(), context, left, right, fixed_right_arg);
       break;
     case Token::DIV:
       instr = HDiv::New(zone(), context, left, right);
@@ -9538,8 +9172,8 @@
       break;
     case Token::BIT_OR: {
       HValue* operand, *shift_amount;
-      if (left_type->Is(Type::Integer32()) &&
-          right_type->Is(Type::Integer32()) &&
+      if (left_type->Is(Type::Signed32()) &&
+          right_type->Is(Type::Signed32()) &&
           MatchRotateRight(left, right, &operand, &shift_amount)) {
         instr = new(zone()) HRor(context, operand, shift_amount);
       } else {
@@ -9581,7 +9215,7 @@
   if (call == NULL) return false;
   Literal* literal = expr->right()->AsLiteral();
   if (literal == NULL) return false;
-  if (!literal->handle()->IsString()) return false;
+  if (!literal->value()->IsString()) return false;
   if (!call->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_ClassOf"))) {
     return false;
   }
@@ -9739,7 +9373,7 @@
 
 Representation HOptimizedGraphBuilder::ToRepresentation(Handle<Type> type) {
   if (type->Is(Type::None())) return Representation::None();
-  if (type->Is(Type::Integer32())) return Representation::Integer32();
+  if (type->Is(Type::Signed32())) return Representation::Integer32();
   if (type->Is(Type::Number())) return Representation::Double();
   return Representation::Tagged();
 }
@@ -9827,24 +9461,24 @@
     CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
     HValue* value = Pop();
     Literal* literal = expr->right()->AsLiteral();
-    Handle<String> rhs = Handle<String>::cast(literal->handle());
+    Handle<String> rhs = Handle<String>::cast(literal->value());
     HClassOfTestAndBranch* instr =
         new(zone()) HClassOfTestAndBranch(value, rhs);
     instr->set_position(expr->position());
     return ast_context()->ReturnControl(instr, expr->id());
   }
 
-  Handle<Type> left_type = expr->left_type();
-  Handle<Type> right_type = expr->right_type();
-  Handle<Type> overall_type = expr->overall_type();
-  Representation combined_rep = ToRepresentation(overall_type);
+  Handle<Type> left_type = expr->left()->lower_type();
+  Handle<Type> right_type = expr->right()->lower_type();
+  Handle<Type> combined_type = expr->combined_type();
+  Representation combined_rep = ToRepresentation(combined_type);
   Representation left_rep = ToRepresentation(left_type);
   Representation right_rep = ToRepresentation(right_type);
   // Check if this expression was ever executed according to type feedback.
   // Note that for the special typeof/null/undefined cases we get unknown here.
-  if (overall_type->Is(Type::None())) {
+  if (combined_type->Is(Type::None())) {
     AddSoftDeoptimize();
-    overall_type = left_type = right_type = handle(Type::Any(), isolate());
+    combined_type = left_type = right_type = handle(Type::Any(), isolate());
   }
 
   CHECK_ALIVE(VisitForValue(expr->left()));
@@ -9906,7 +9540,7 @@
       result->set_position(expr->position());
       return ast_context()->ReturnInstruction(result, expr->id());
     } else {
-      AddInstruction(new(zone()) HCheckFunction(right, target));
+      Add<HCheckFunction>(right, target);
       HInstanceOfKnownGlobal* result =
           new(zone()) HInstanceOfKnownGlobal(context, left, target);
       result->set_position(expr->position());
@@ -9916,13 +9550,13 @@
     HIn* result = new(zone()) HIn(context, left, right);
     result->set_position(expr->position());
     return ast_context()->ReturnInstruction(result, expr->id());
-  } else if (overall_type->Is(Type::Receiver())) {
+  } else if (combined_type->Is(Type::Receiver())) {
     switch (op) {
       case Token::EQ:
       case Token::EQ_STRICT: {
         // Can we get away with map check and not instance type check?
-        if (overall_type->IsClass()) {
-          Handle<Map> map = overall_type->AsClass();
+        if (combined_type->IsClass()) {
+          Handle<Map> map = combined_type->AsClass();
           AddCheckMapsWithTransitions(left, map);
           AddCheckMapsWithTransitions(right, map);
           HCompareObjectEqAndBranch* result =
@@ -9930,9 +9564,9 @@
           result->set_position(expr->position());
           return ast_context()->ReturnControl(result, expr->id());
         } else {
-          BuildCheckNonSmi(left);
+          BuildCheckHeapObject(left);
           AddInstruction(HCheckInstanceType::NewIsSpecObject(left, zone()));
-          BuildCheckNonSmi(right);
+          BuildCheckHeapObject(right);
           AddInstruction(HCheckInstanceType::NewIsSpecObject(right, zone()));
           HCompareObjectEqAndBranch* result =
               new(zone()) HCompareObjectEqAndBranch(left, right);
@@ -9943,11 +9577,11 @@
       default:
         return Bailout("Unsupported non-primitive compare");
     }
-  } else if (overall_type->Is(Type::InternalizedString()) &&
+  } else if (combined_type->Is(Type::InternalizedString()) &&
              Token::IsEqualityOp(op)) {
-    BuildCheckNonSmi(left);
+    BuildCheckHeapObject(left);
     AddInstruction(HCheckInstanceType::NewIsInternalizedString(left, zone()));
-    BuildCheckNonSmi(right);
+    BuildCheckHeapObject(right);
     AddInstruction(HCheckInstanceType::NewIsInternalizedString(right, zone()));
     HCompareObjectEqAndBranch* result =
         new(zone()) HCompareObjectEqAndBranch(left, right);
@@ -9964,8 +9598,8 @@
     } else {
       // TODO(verwaest): Remove once ToRepresentation properly returns Smi when
       // the IC measures Smi.
-      if (left_type->Is(Type::Integer31())) left_rep = Representation::Smi();
-      if (right_type->Is(Type::Integer31())) right_rep = Representation::Smi();
+      if (left_type->Is(Type::Smi())) left_rep = Representation::Smi();
+      if (right_type->Is(Type::Smi())) right_rep = Representation::Smi();
       HCompareIDAndBranch* result =
           new(zone()) HCompareIDAndBranch(left, right, op);
       result->set_observed_input_representation(left_rep, right_rep);
@@ -9994,8 +9628,8 @@
     if_nil.CaptureContinuation(&continuation);
     return ast_context()->ReturnContinuation(&continuation, expr->id());
   }
-  Handle<Type> type = expr->compare_nil_type()->Is(Type::None())
-      ? handle(Type::Any(), isolate_) : expr->compare_nil_type();
+  Handle<Type> type = expr->combined_type()->Is(Type::None())
+      ? handle(Type::Any(), isolate_) : expr->combined_type();
   BuildCompareNil(value, type, expr->position(), &continuation);
   return ast_context()->ReturnContinuation(&continuation, expr->id());
 }
@@ -10020,29 +9654,43 @@
     int data_size,
     int pointer_size,
     AllocationSiteMode mode) {
-  Zone* zone = this->zone();
-  int total_size = data_size + pointer_size;
-
   NoObservableSideEffectsScope no_effects(this);
 
-  HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
-  // TODO(hpayer): add support for old data space
-  if (isolate()->heap()->ShouldGloballyPretenure() &&
-      data_size == 0) {
-    flags = static_cast<HAllocate::Flags>(
-        flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
+  HInstruction* target = NULL;
+  HInstruction* data_target = NULL;
+
+  HAllocate::Flags flags = HAllocate::DefaultFlags();
+
+  if (isolate()->heap()->ShouldGloballyPretenure()) {
+    if (data_size != 0) {
+      HAllocate::Flags data_flags =
+          static_cast<HAllocate::Flags>(HAllocate::DefaultFlags() |
+              HAllocate::CAN_ALLOCATE_IN_OLD_DATA_SPACE);
+      HValue* size_in_bytes = Add<HConstant>(data_size);
+      data_target = Add<HAllocate>(context, size_in_bytes,
+                                   HType::JSObject(), data_flags);
+      Handle<Map> free_space_map = isolate()->factory()->free_space_map();
+      AddStoreMapConstant(data_target, free_space_map);
+      HObjectAccess access =
+          HObjectAccess::ForJSObjectOffset(FreeSpace::kSizeOffset);
+      AddStore(data_target, access, size_in_bytes);
+    }
+    if (pointer_size != 0) {
+      flags = static_cast<HAllocate::Flags>(
+          flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
+      HValue* size_in_bytes = Add<HConstant>(pointer_size);
+      target = Add<HAllocate>(context, size_in_bytes, HType::JSObject(), flags);
+    }
+  } else {
+    HValue* size_in_bytes = Add<HConstant>(data_size + pointer_size);
+    target = Add<HAllocate>(context, size_in_bytes, HType::JSObject(), flags);
   }
 
-  HValue* size_in_bytes = AddInstruction(new(zone) HConstant(total_size));
-  HInstruction* result =
-      AddInstruction(new(zone) HAllocate(context,
-                                         size_in_bytes,
-                                         HType::JSObject(),
-                                         flags));
   int offset = 0;
-  BuildEmitDeepCopy(boilerplate_object, original_boilerplate_object, result,
-                    &offset, mode);
-  return result;
+  int data_offset = 0;
+  BuildEmitDeepCopy(boilerplate_object, original_boilerplate_object, target,
+                    &offset, data_target, &data_offset, mode);
+  return target;
 }
 
 
@@ -10051,46 +9699,56 @@
     Handle<JSObject> original_boilerplate_object,
     HInstruction* target,
     int* offset,
+    HInstruction* data_target,
+    int* data_offset,
     AllocationSiteMode mode) {
-  Zone* zone = this->zone();
-
   Handle<FixedArrayBase> elements(boilerplate_object->elements());
   Handle<FixedArrayBase> original_elements(
       original_boilerplate_object->elements());
   ElementsKind kind = boilerplate_object->map()->elements_kind();
 
-  // Increase the offset so that subsequent objects end up right after
-  // this object and its backing store.
   int object_offset = *offset;
   int object_size = boilerplate_object->map()->instance_size();
   int elements_size = (elements->length() > 0 &&
       elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
           elements->Size() : 0;
-  int elements_offset = *offset + object_size;
+  int elements_offset = 0;
 
-  *offset += object_size + elements_size;
+  if (data_target != NULL && boilerplate_object->HasFastDoubleElements()) {
+    elements_offset = *data_offset;
+    *data_offset += elements_size;
+  } else {
+    // Place elements right after this object.
+    elements_offset = *offset + object_size;
+    *offset += elements_size;
+  }
+  // Increase the offset so that subsequent objects end up right after this
+  // object (and it's elements if they are allocated in the same space).
+  *offset += object_size;
 
   // Copy object elements if non-COW.
   HValue* object_elements = BuildEmitObjectHeader(boilerplate_object, target,
-      object_offset, elements_offset, elements_size);
+      data_target, object_offset, elements_offset, elements_size);
   if (object_elements != NULL) {
     BuildEmitElements(elements, original_elements, kind, object_elements,
-        target, offset);
+        target, offset, data_target, data_offset);
   }
 
   // Copy in-object properties.
-  HValue* object_properties =
-      AddInstruction(new(zone) HInnerAllocatedObject(target, object_offset));
-  BuildEmitInObjectProperties(boilerplate_object, original_boilerplate_object,
-      object_properties, target, offset);
+  if (boilerplate_object->map()->NumberOfFields() != 0) {
+    HValue* object_properties =
+        Add<HInnerAllocatedObject>(target, object_offset);
+    BuildEmitInObjectProperties(boilerplate_object, original_boilerplate_object,
+        object_properties, target, offset, data_target, data_offset);
+  }
 
   // Create allocation site info.
   if (mode == TRACK_ALLOCATION_SITE &&
       boilerplate_object->map()->CanTrackAllocationSite()) {
     elements_offset += AllocationSiteInfo::kSize;
     *offset += AllocationSiteInfo::kSize;
-    HInstruction* original_boilerplate = AddInstruction(new(zone) HConstant(
-        original_boilerplate_object));
+    HInstruction* original_boilerplate =
+        Add<HConstant>(original_boilerplate_object);
     BuildCreateAllocationSiteInfo(target, JSArray::kSize, original_boilerplate);
   }
 }
@@ -10099,15 +9757,14 @@
 HValue* HOptimizedGraphBuilder::BuildEmitObjectHeader(
     Handle<JSObject> boilerplate_object,
     HInstruction* target,
+    HInstruction* data_target,
     int object_offset,
     int elements_offset,
     int elements_size) {
   ASSERT(boilerplate_object->properties()->length() == 0);
-  Zone* zone = this->zone();
   HValue* result = NULL;
 
-  HValue* object_header =
-      AddInstruction(new(zone) HInnerAllocatedObject(target, object_offset));
+  HValue* object_header = Add<HInnerAllocatedObject>(target, object_offset);
   Handle<Map> boilerplate_object_map(boilerplate_object->map());
   AddStoreMapConstant(object_header, boilerplate_object_map);
 
@@ -10115,10 +9772,13 @@
   if (elements_size == 0) {
     Handle<Object> elements_field =
         Handle<Object>(boilerplate_object->elements(), isolate());
-    elements = AddInstruction(new(zone) HConstant(elements_field));
+    elements = Add<HConstant>(elements_field);
   } else {
-    elements = AddInstruction(new(zone) HInnerAllocatedObject(
-        target, elements_offset));
+    if (data_target != NULL && boilerplate_object->HasFastDoubleElements()) {
+      elements = Add<HInnerAllocatedObject>(data_target, elements_offset);
+    } else {
+      elements = Add<HInnerAllocatedObject>(target, elements_offset);
+    }
     result = elements;
   }
   AddStore(object_header, HObjectAccess::ForElementsPointer(), elements);
@@ -10126,8 +9786,7 @@
   Handle<Object> properties_field =
       Handle<Object>(boilerplate_object->properties(), isolate());
   ASSERT(*properties_field == isolate()->heap()->empty_fixed_array());
-  HInstruction* properties = AddInstruction(new(zone) HConstant(
-      properties_field));
+  HInstruction* properties = Add<HConstant>(properties_field);
   HObjectAccess access = HObjectAccess::ForPropertiesPointer();
   AddStore(object_header, access, properties);
 
@@ -10136,7 +9795,7 @@
         Handle<JSArray>::cast(boilerplate_object);
     Handle<Object> length_field =
         Handle<Object>(boilerplate_array->length(), isolate());
-    HInstruction* length = AddInstruction(new(zone) HConstant(length_field));
+    HInstruction* length = Add<HConstant>(length_field);
 
     ASSERT(boilerplate_array->length()->IsSmi());
     Representation representation =
@@ -10155,8 +9814,9 @@
     Handle<JSObject> original_boilerplate_object,
     HValue* object_properties,
     HInstruction* target,
-    int* offset) {
-  Zone* zone = this->zone();
+    int* offset,
+    HInstruction* data_target,
+    int* data_offset) {
   Handle<DescriptorArray> descriptors(
       boilerplate_object->map()->instance_descriptors());
   int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
@@ -10183,28 +9843,32 @@
       Handle<JSObject> original_value_object = Handle<JSObject>::cast(
           Handle<Object>(original_boilerplate_object->InObjectPropertyAt(index),
               isolate()));
-      HInstruction* value_instruction =
-          AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
+      HInstruction* value_instruction = Add<HInnerAllocatedObject>(target,
+                                                                   *offset);
 
       AddStore(object_properties, access, value_instruction);
 
       BuildEmitDeepCopy(value_object, original_value_object, target,
-          offset, DONT_TRACK_ALLOCATION_SITE);
+          offset, data_target, data_offset, DONT_TRACK_ALLOCATION_SITE);
     } else {
       Representation representation = details.representation();
-      HInstruction* value_instruction =
-          AddInstruction(new(zone) HConstant(value));
+      HInstruction* value_instruction = Add<HConstant>(value);
 
       if (representation.IsDouble()) {
         // Allocate a HeapNumber box and store the value into it.
-        HInstruction* double_box =
-            AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
+        HInstruction* double_box;
+        if (data_target != NULL) {
+          double_box = Add<HInnerAllocatedObject>(data_target, *data_offset);
+          *data_offset += HeapNumber::kSize;
+        } else {
+          double_box = Add<HInnerAllocatedObject>(target, *offset);
+          *offset += HeapNumber::kSize;
+        }
         AddStoreMapConstant(double_box,
             isolate()->factory()->heap_number_map());
         AddStore(double_box, HObjectAccess::ForHeapNumberValue(),
             value_instruction, Representation::Double());
         value_instruction = double_box;
-        *offset += HeapNumber::kSize;
       }
 
       AddStore(object_properties, access, value_instruction);
@@ -10212,8 +9876,8 @@
   }
 
   int inobject_properties = boilerplate_object->map()->inobject_properties();
-  HInstruction* value_instruction = AddInstruction(new(zone)
-      HConstant(isolate()->factory()->one_pointer_filler_map()));
+  HInstruction* value_instruction =
+      Add<HConstant>(isolate()->factory()->one_pointer_filler_map());
   for (int i = copied_fields; i < inobject_properties; i++) {
     ASSERT(boilerplate_object->IsJSObject());
     int property_offset = boilerplate_object->GetInObjectPropertyOffset(i);
@@ -10229,12 +9893,11 @@
     ElementsKind kind,
     HValue* object_elements,
     HInstruction* target,
-    int* offset) {
-  Zone* zone = this->zone();
-
+    int* offset,
+    HInstruction* data_target,
+    int* data_offset) {
   int elements_length = elements->length();
-  HValue* object_elements_length =
-      AddInstruction(new(zone) HConstant(elements_length));
+  HValue* object_elements_length = Add<HConstant>(elements_length);
 
   BuildInitializeElementsHeader(object_elements, kind, object_elements_length);
 
@@ -10243,7 +9906,7 @@
     BuildEmitFixedDoubleArray(elements, kind, object_elements);
   } else if (elements->IsFixedArray()) {
     BuildEmitFixedArray(elements, original_elements, kind, object_elements,
-        target, offset);
+        target, offset, data_target, data_offset);
   } else {
     UNREACHABLE();
   }
@@ -10254,17 +9917,16 @@
     Handle<FixedArrayBase> elements,
     ElementsKind kind,
     HValue* object_elements) {
-  Zone* zone = this->zone();
-  HInstruction* boilerplate_elements =
-      AddInstruction(new(zone) HConstant(elements));
+  HInstruction* boilerplate_elements = Add<HConstant>(elements);
   int elements_length = elements->length();
   for (int i = 0; i < elements_length; i++) {
-    HValue* key_constant = AddInstruction(new(zone) HConstant(i));
+    HValue* key_constant = Add<HConstant>(i);
     HInstruction* value_instruction =
-        AddInstruction(new(zone) HLoadKeyed(
-            boilerplate_elements, key_constant, NULL, kind, ALLOW_RETURN_HOLE));
-    HInstruction* store = AddInstruction(new(zone) HStoreKeyed(
-        object_elements, key_constant, value_instruction, kind));
+        Add<HLoadKeyed>(boilerplate_elements, key_constant,
+                        static_cast<HValue*>(NULL), kind,
+                        ALLOW_RETURN_HOLE);
+    HInstruction* store = Add<HStoreKeyed>(object_elements, key_constant,
+                                           value_instruction, kind);
     store->SetFlag(HValue::kAllowUndefinedAsNaN);
   }
 }
@@ -10276,34 +9938,32 @@
     ElementsKind kind,
     HValue* object_elements,
     HInstruction* target,
-    int* offset) {
-  Zone* zone = this->zone();
-  HInstruction* boilerplate_elements =
-      AddInstruction(new(zone) HConstant(elements));
+    int* offset,
+    HInstruction* data_target,
+    int* data_offset) {
+  HInstruction* boilerplate_elements = Add<HConstant>(elements);
   int elements_length = elements->length();
   Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
   Handle<FixedArray> original_fast_elements =
       Handle<FixedArray>::cast(original_elements);
   for (int i = 0; i < elements_length; i++) {
     Handle<Object> value(fast_elements->get(i), isolate());
-    HValue* key_constant = AddInstruction(new(zone) HConstant(i));
+    HValue* key_constant = Add<HConstant>(i);
     if (value->IsJSObject()) {
       Handle<JSObject> value_object = Handle<JSObject>::cast(value);
       Handle<JSObject> original_value_object = Handle<JSObject>::cast(
           Handle<Object>(original_fast_elements->get(i), isolate()));
-      HInstruction* value_instruction =
-          AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
-      AddInstruction(new(zone) HStoreKeyed(
-          object_elements, key_constant, value_instruction, kind));
+      HInstruction* value_instruction = Add<HInnerAllocatedObject>(target,
+                                                                   *offset);
+      Add<HStoreKeyed>(object_elements, key_constant, value_instruction, kind);
       BuildEmitDeepCopy(value_object, original_value_object, target,
-          offset, DONT_TRACK_ALLOCATION_SITE);
+          offset, data_target, data_offset, DONT_TRACK_ALLOCATION_SITE);
     } else {
       HInstruction* value_instruction =
-          AddInstruction(new(zone) HLoadKeyed(
-              boilerplate_elements, key_constant, NULL, kind,
-              ALLOW_RETURN_HOLE));
-      AddInstruction(new(zone) HStoreKeyed(
-          object_elements, key_constant, value_instruction, kind));
+          Add<HLoadKeyed>(boilerplate_elements, key_constant,
+                          static_cast<HValue*>(NULL), kind,
+                          ALLOW_RETURN_HOLE);
+      Add<HStoreKeyed>(object_elements, key_constant, value_instruction, kind);
     }
   }
 }
@@ -10328,9 +9988,7 @@
     int flags = DeclareGlobalsEvalFlag::encode(current_info()->is_eval()) |
         DeclareGlobalsNativeFlag::encode(current_info()->is_native()) |
         DeclareGlobalsLanguageMode::encode(current_info()->language_mode());
-    HInstruction* result = new(zone()) HDeclareGlobals(
-        environment()->LookupContext(), array, flags);
-    AddInstruction(result);
+    Add<HDeclareGlobals>(environment()->LookupContext(), array, flags);
     globals_.Clear();
   }
 }
@@ -10360,9 +10018,8 @@
       if (hole_init) {
         HValue* value = graph()->GetConstantHole();
         HValue* context = environment()->LookupContext();
-        HStoreContextSlot* store = new(zone()) HStoreContextSlot(
+        HStoreContextSlot* store = Add<HStoreContextSlot>(
             context, variable->index(), HStoreContextSlot::kNoCheck, value);
-        AddInstruction(store);
         if (store->HasObservableSideEffects()) {
           AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
         }
@@ -10399,9 +10056,8 @@
       CHECK_ALIVE(VisitForValue(declaration->fun()));
       HValue* value = Pop();
       HValue* context = environment()->LookupContext();
-      HStoreContextSlot* store = new(zone()) HStoreContextSlot(
+      HStoreContextSlot* store = Add<HStoreContextSlot>(
           context, variable->index(), HStoreContextSlot::kNoCheck, value);
-      AddInstruction(store);
       if (store->HasObservableSideEffects()) {
         AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
       }
@@ -10573,8 +10229,7 @@
   // function is blacklisted by AstNode::IsInlineable.
   ASSERT(function_state()->outer() == NULL);
   ASSERT(call->arguments()->length() == 0);
-  HInstruction* elements = AddInstruction(
-      new(zone()) HArgumentsElements(false));
+  HInstruction* elements = Add<HArgumentsElements>(false);
   HArgumentsLength* result = new(zone()) HArgumentsLength(elements);
   return ast_context()->ReturnInstruction(result, call->id());
 }
@@ -10588,10 +10243,9 @@
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* index = Pop();
-  HInstruction* elements = AddInstruction(
-      new(zone()) HArgumentsElements(false));
-  HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
-  HInstruction* checked_index = AddBoundsCheck(index, length);
+  HInstruction* elements = Add<HArgumentsElements>(false);
+  HInstruction* length = Add<HArgumentsLength>(elements);
+  HInstruction* checked_index = Add<HBoundsCheck>(index, length);
   HAccessArgumentsAt* result =
       new(zone()) HAccessArgumentsAt(elements, length, checked_index);
   return ast_context()->ReturnInstruction(result, call->id());
@@ -10618,7 +10272,7 @@
 void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
   ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral());
-  Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->handle()));
+  Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->value()));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* date = Pop();
   HDateField* result = new(zone()) HDateField(date, index);
@@ -10755,8 +10409,7 @@
 // Fast support for Math.random().
 void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
   HValue* context = environment()->LookupContext();
-  HGlobalObject* global_object = new(zone()) HGlobalObject(context);
-  AddInstruction(global_object);
+  HGlobalObject* global_object = Add<HGlobalObject>(context);
   HRandom* result = new(zone()) HRandom(global_object);
   return ast_context()->ReturnInstruction(result, call->id());
 }
@@ -10862,15 +10515,14 @@
   current_block()->Finish(typecheck);
 
   set_current_block(if_jsfunction);
-  HInstruction* invoke_result = AddInstruction(
-      new(zone()) HInvokeFunction(context, function, arg_count));
+  HInstruction* invoke_result =
+      Add<HInvokeFunction>(context, function, arg_count);
   Drop(arg_count);
   Push(invoke_result);
   if_jsfunction->Goto(join);
 
   set_current_block(if_nonfunction);
-  HInstruction* call_result = AddInstruction(
-      new(zone()) HCallFunction(context, function, arg_count));
+  HInstruction* call_result = Add<HCallFunction>(context, function, arg_count);
   Drop(arg_count);
   Push(call_result);
   if_nonfunction->Goto(join);
@@ -11528,10 +11180,10 @@
   }
 
   for (int i = 0; i < names_.length(); ++i) {
-    PrintF("%30s", names_[i]);
+    PrintF("%32s", names_[i]);
     double ms = static_cast<double>(timing_[i]) / 1000;
     double percent = static_cast<double>(timing_[i]) * 100 / sum;
-    PrintF(" - %8.3f ms / %4.1f %% ", ms, percent);
+    PrintF(" %8.3f ms / %4.1f %% ", ms, percent);
 
     unsigned size = sizes_[i];
     double size_percent = static_cast<double>(size) * 100 / total_size_;
@@ -11541,21 +11193,21 @@
   PrintF("----------------------------------------"
          "---------------------------------------\n");
   int64_t total = create_graph_ + optimize_graph_ + generate_code_;
-  PrintF("%30s - %8.3f ms / %4.1f %% \n",
+  PrintF("%32s %8.3f ms / %4.1f %% \n",
          "Create graph",
          static_cast<double>(create_graph_) / 1000,
          static_cast<double>(create_graph_) * 100 / total);
-  PrintF("%30s - %8.3f ms / %4.1f %% \n",
+  PrintF("%32s %8.3f ms / %4.1f %% \n",
          "Optimize graph",
          static_cast<double>(optimize_graph_) / 1000,
          static_cast<double>(optimize_graph_) * 100 / total);
-  PrintF("%30s - %8.3f ms / %4.1f %% \n",
+  PrintF("%32s %8.3f ms / %4.1f %% \n",
          "Generate and install code",
          static_cast<double>(generate_code_) / 1000,
          static_cast<double>(generate_code_) * 100 / total);
   PrintF("----------------------------------------"
          "---------------------------------------\n");
-  PrintF("%30s - %8.3f ms (%.1f times slower than full code gen)\n",
+  PrintF("%32s %8.3f ms (%.1f times slower than full code gen)\n",
          "Total",
          static_cast<double>(total) / 1000,
          static_cast<double>(total) / full_code_gen_);
@@ -11567,99 +11219,34 @@
   double normalized_size_in_kb = source_size_in_kb > 0
       ? total_size_ / 1024 / source_size_in_kb
       : 0;
-  PrintF("%30s - %8.3f ms           %7.3f kB allocated\n",
+  PrintF("%32s %8.3f ms           %7.3f kB allocated\n",
          "Average per kB source",
          normalized_time, normalized_size_in_kb);
 }
 
 
 void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) {
-  if (name == HPhase::kFullCodeGen) {
-    full_code_gen_ += ticks;
-  } else {
-    total_size_ += size;
-    for (int i = 0; i < names_.length(); ++i) {
-      if (strcmp(names_[i], name) == 0) {
-        timing_[i] += ticks;
-        sizes_[i] += size;
-        return;
-      }
+  total_size_ += size;
+  for (int i = 0; i < names_.length(); ++i) {
+    if (strcmp(names_[i], name) == 0) {
+      timing_[i] += ticks;
+      sizes_[i] += size;
+      return;
     }
-    names_.Add(name);
-    timing_.Add(ticks);
-    sizes_.Add(size);
   }
-}
-
-
-const char* const HPhase::kFullCodeGen = "Full code generator";
-
-
-HPhase::HPhase(const char* name, Isolate* isolate) {
-  Init(isolate, name, NULL, NULL, NULL);
-}
-
-
-HPhase::HPhase(const char* name, HGraph* graph) {
-  Init(graph->isolate(), name, graph, NULL, NULL);
-}
-
-
-HPhase::HPhase(const char* name, LChunk* chunk) {
-  Init(chunk->isolate(), name, NULL, chunk, NULL);
-}
-
-
-HPhase::HPhase(const char* name, LAllocator* allocator) {
-  Init(allocator->isolate(), name, NULL, NULL, allocator);
-}
-
-
-void HPhase::Init(Isolate* isolate,
-                  const char* name,
-                  HGraph* graph,
-                  LChunk* chunk,
-                  LAllocator* allocator) {
-  isolate_ = isolate;
-  name_ = name;
-  graph_ = graph;
-  chunk_ = chunk;
-  allocator_ = allocator;
-  if (allocator != NULL && chunk_ == NULL) {
-    chunk_ = allocator->chunk();
-  }
-  if (FLAG_hydrogen_stats) {
-    start_ticks_ = OS::Ticks();
-    start_allocation_size_ = Zone::allocation_size_;
-  }
+  names_.Add(name);
+  timing_.Add(ticks);
+  sizes_.Add(size);
 }
 
 
 HPhase::~HPhase() {
-  if (FLAG_hydrogen_stats) {
-    int64_t ticks = OS::Ticks() - start_ticks_;
-    unsigned size = Zone::allocation_size_ - start_allocation_size_;
-    isolate_->GetHStatistics()->SaveTiming(name_, ticks, size);
-  }
-
-  // Produce trace output if flag is set so that the first letter of the
-  // phase name matches the command line parameter FLAG_trace_phase.
-  if (FLAG_trace_hydrogen &&
-      OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL) {
-    if (graph_ != NULL) {
-      isolate_->GetHTracer()->TraceHydrogen(name_, graph_);
-    }
-    if (chunk_ != NULL) {
-      isolate_->GetHTracer()->TraceLithium(name_, chunk_);
-    }
-    if (allocator_ != NULL) {
-      isolate_->GetHTracer()->TraceLiveRanges(name_, allocator_);
-    }
+  if (ShouldProduceTraceOutput()) {
+    isolate()->GetHTracer()->TraceHydrogen(name(), graph_);
   }
 
 #ifdef DEBUG
-  if (graph_ != NULL) graph_->Verify(false);  // No full verify.
-  if (allocator_ != NULL) allocator_->Verify();
+  graph_->Verify(false);  // No full verify.
 #endif
 }
 
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 75cbc25..7442b5f 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -66,6 +66,7 @@
   HInstruction* first() const { return first_; }
   HInstruction* last() const { return last_; }
   void set_last(HInstruction* instr) { last_ = instr; }
+  HInstruction* GetLastInstruction();
   HControlInstruction* end() const { return end_; }
   HLoopInformation* loop_information() const { return loop_information_; }
   const ZoneList<HBasicBlock*>* predecessors() const { return &predecessors_; }
@@ -283,7 +284,6 @@
   void MarkDeoptimizeOnUndefined();
   void ComputeMinusZeroChecks();
   void ComputeSafeUint32Operations();
-  void GlobalValueNumbering();
   bool ProcessArgumentsObject();
   void EliminateRedundantPhis();
   void Canonicalize();
@@ -427,6 +427,12 @@
   HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
                          int32_t integer_value);
 
+  template<class Phase>
+  void Run() {
+    Phase phase(this);
+    phase.Run();
+  }
+
   void MarkLive(HValue* ref, HValue* instr, ZoneList<HValue*>* worklist);
   void MarkLiveInstructions();
   void RemoveDeadInstructions();
@@ -700,25 +706,6 @@
 };
 
 
-class HInferRepresentation BASE_EMBEDDED {
- public:
-  explicit HInferRepresentation(HGraph* graph)
-      : graph_(graph),
-        worklist_(8, graph->zone()),
-        in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
-
-  void Analyze();
-  void AddToWorklist(HValue* current);
-
- private:
-  Zone* zone() const { return graph_->zone(); }
-
-  HGraph* graph_;
-  ZoneList<HValue*> worklist_;
-  BitVector in_worklist_;
-};
-
-
 class HOptimizedGraphBuilder;
 
 enum ArgumentsAllowedFlag {
@@ -991,9 +978,57 @@
 
   // Adding instructions.
   HInstruction* AddInstruction(HInstruction* instr);
+
+  template<class I>
+  I* Add() { return static_cast<I*>(AddInstruction(new(zone()) I())); }
+
+  template<class I, class P1>
+  I* Add(P1 p1) {
+    return static_cast<I*>(AddInstruction(new(zone()) I(p1)));
+  }
+
+  template<class I, class P1, class P2>
+  I* Add(P1 p1, P2 p2) {
+      return static_cast<I*>(AddInstruction(new(zone()) I(p1, p2)));
+  }
+
+  template<class I, class P1, class P2, class P3>
+  I* Add(P1 p1, P2 p2, P3 p3) {
+    return static_cast<I*>(AddInstruction(new(zone()) I(p1, p2, p3)));
+  }
+
+  template<class I, class P1, class P2, class P3, class P4>
+  I* Add(P1 p1, P2 p2, P3 p3, P4 p4) {
+    return static_cast<I*>(AddInstruction(new(zone()) I(p1, p2, p3, p4)));
+  }
+
+  template<class I, class P1, class P2, class P3, class P4, class P5>
+  I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
+    return static_cast<I*>(AddInstruction(new(zone()) I(p1, p2, p3, p4, p5)));
+  }
+
+  template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
+  I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
+    return static_cast<I*>(AddInstruction(
+            new(zone()) I(p1, p2, p3, p4, p5, p6)));
+  }
+
+  template<class I, class P1, class P2, class P3,
+           class P4, class P5, class P6, class P7>
+  I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
+    return static_cast<I*>(AddInstruction(
+            new(zone()) I(p1, p2, p3, p4, p5, p6, p7)));
+  }
+
+  template<class I, class P1, class P2, class P3, class P4,
+           class P5, class P6, class P7, class P8>
+  I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) {
+    return static_cast<I*>(AddInstruction(
+            new(zone()) I(p1, p2, p3, p4, p5, p6, p7, p8)));
+  }
+
   void AddSimulate(BailoutId id,
                    RemovableSimulate removable = FIXED_SIMULATE);
-  HBoundsCheck* AddBoundsCheck(HValue* index, HValue* length);
 
   HReturn* AddReturn(HValue* value);
 
@@ -1011,7 +1046,7 @@
   HBasicBlock* CreateBasicBlock(HEnvironment* env);
   HBasicBlock* CreateLoopHeaderBlock();
 
-  HValue* BuildCheckNonSmi(HValue* object);
+  HValue* BuildCheckHeapObject(HValue* object);
   HValue* BuildCheckMap(HValue* obj, Handle<Map> map);
 
   // Building common constructs
@@ -1270,7 +1305,8 @@
     JSArrayBuilder(HGraphBuilder* builder,
                    ElementsKind kind,
                    HValue* allocation_site_payload,
-                   bool disable_allocation_sites);
+                   HValue* constructor_function,
+                   AllocationSiteOverrideMode override_mode);
 
     JSArrayBuilder(HGraphBuilder* builder,
                    ElementsKind kind,
@@ -1286,9 +1322,6 @@
     int elements_size() const {
       return IsFastDoubleElementsKind(kind_) ? kDoubleSize : kPointerSize;
     }
-    HInstruction* AddInstruction(HInstruction* instr) {
-      return builder_->AddInstruction(instr);
-    }
     HGraphBuilder* builder() { return builder_; }
     HGraph* graph() { return builder_->graph(); }
     int initial_capacity() {
@@ -1623,8 +1656,6 @@
   // Visit a list of expressions from left to right, each in a value context.
   void VisitExpressions(ZoneList<Expression*>* exprs);
 
-  void AddPhi(HPhi* phi);
-
   void PushAndAdd(HInstruction* instr);
 
   // Remove the arguments from the bailout environment and emit instructions
@@ -1829,13 +1860,16 @@
 
   void BuildEmitDeepCopy(Handle<JSObject> boilerplat_object,
                          Handle<JSObject> object,
-                         HInstruction* result,
+                         HInstruction* target,
                          int* offset,
+                         HInstruction* data_target,
+                         int* data_offset,
                          AllocationSiteMode mode);
 
   MUST_USE_RESULT HValue* BuildEmitObjectHeader(
       Handle<JSObject> boilerplat_object,
       HInstruction* target,
+      HInstruction* data_target,
       int object_offset,
       int elements_offset,
       int elements_size);
@@ -1844,14 +1878,18 @@
                                    Handle<JSObject> original_boilerplate_object,
                                    HValue* object_properties,
                                    HInstruction* target,
-                                   int* offset);
+                                   int* offset,
+                                   HInstruction* data_target,
+                                   int* data_offset);
 
   void BuildEmitElements(Handle<FixedArrayBase> elements,
                          Handle<FixedArrayBase> original_elements,
                          ElementsKind kind,
                          HValue* object_elements,
                          HInstruction* target,
-                         int* offset);
+                         int* offset,
+                         HInstruction* data_target,
+                         int* data_offset);
 
   void BuildEmitFixedDoubleArray(Handle<FixedArrayBase> elements,
                                  ElementsKind kind,
@@ -1862,7 +1900,9 @@
                            ElementsKind kind,
                            HValue* object_elements,
                            HInstruction* target,
-                           int* offset);
+                           int* offset,
+                           HInstruction* data_target,
+                           int* data_offset);
 
   void AddCheckPrototypeMaps(Handle<JSObject> holder,
                              Handle<Map> receiver_map);
@@ -1922,6 +1962,10 @@
   void Print();
   void SaveTiming(const char* name, int64_t ticks, unsigned size);
 
+  void IncrementFullCodeGen(int64_t full_code_gen) {
+    full_code_gen_ += full_code_gen;
+  }
+
   void IncrementSubtotals(int64_t create_graph,
                           int64_t optimize_graph,
                           int64_t generate_code) {
@@ -1943,30 +1987,20 @@
 };
 
 
-class HPhase BASE_EMBEDDED {
+class HPhase : public CompilationPhase {
  public:
-  static const char* const kFullCodeGen;
-
-  HPhase(const char* name, Isolate* isolate);
-  HPhase(const char* name, HGraph* graph);
-  HPhase(const char* name, LChunk* chunk);
-  HPhase(const char* name, LAllocator* allocator);
+  HPhase(const char* name, HGraph* graph)
+      : CompilationPhase(name, graph->info()),
+        graph_(graph) { }
   ~HPhase();
 
- private:
-  void Init(Isolate* isolate,
-            const char* name,
-            HGraph* graph,
-            LChunk* chunk,
-            LAllocator* allocator);
+ protected:
+  HGraph* graph() const { return graph_; }
 
-  Isolate* isolate_;
-  const char* name_;
+ private:
   HGraph* graph_;
-  LChunk* chunk_;
-  LAllocator* allocator_;
-  int64_t start_ticks_;
-  unsigned start_allocation_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(HPhase);
 };
 
 
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 0c737ac..b6ef242 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -132,6 +132,7 @@
 
 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  ASSERT(!target->IsConsString());
   Memory::Object_at(pc_) = target;
   CPU::FlushICache(pc_, sizeof(Address));
   if (mode == UPDATE_WRITE_BARRIER &&
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index bf4ee94..93400ae 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -447,6 +447,8 @@
 
 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
                                              bool is_construct) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // Clear the context before we push it when entering the internal frame.
   __ Set(esi, Immediate(0));
 
@@ -1015,427 +1017,6 @@
 }
 
 
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter initial_capacity is larger than zero an elements
-// backing store is allocated with this size and filled with the hole values.
-// Otherwise the elements backing store is set to the empty FixedArray.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
-                                 Register array_function,
-                                 Register result,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Register scratch3,
-                                 Label* gc_required) {
-  const int initial_capacity = JSArray::kPreallocatedArrayElements;
-  STATIC_ASSERT(initial_capacity >= 0);
-
-  __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
-  // Allocate the JSArray object together with space for a fixed array with the
-  // requested elements.
-  int size = JSArray::kSize;
-  if (initial_capacity > 0) {
-    size += FixedArray::SizeFor(initial_capacity);
-  }
-  __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
-
-  // Allocated the JSArray. Now initialize the fields except for the elements
-  // array.
-  // result: JSObject
-  // scratch1: initial map
-  // scratch2: start of next object
-  __ mov(FieldOperand(result, JSObject::kMapOffset), scratch1);
-  Factory* factory = masm->isolate()->factory();
-  __ mov(FieldOperand(result, JSArray::kPropertiesOffset),
-         factory->empty_fixed_array());
-  // Field JSArray::kElementsOffset is initialized later.
-  __ mov(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
-
-  // If no storage is requested for the elements array just set the empty
-  // fixed array.
-  if (initial_capacity == 0) {
-    __ mov(FieldOperand(result, JSArray::kElementsOffset),
-           factory->empty_fixed_array());
-    return;
-  }
-
-  // Calculate the location of the elements array and set elements array member
-  // of the JSArray.
-  // result: JSObject
-  // scratch2: start of next object
-  __ lea(scratch1, Operand(result, JSArray::kSize));
-  __ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
-
-  // Initialize the FixedArray and fill it with holes. FixedArray length is
-  // stored as a smi.
-  // result: JSObject
-  // scratch1: elements array
-  // scratch2: start of next object
-  __ mov(FieldOperand(scratch1, FixedArray::kMapOffset),
-         factory->fixed_array_map());
-  __ mov(FieldOperand(scratch1, FixedArray::kLengthOffset),
-         Immediate(Smi::FromInt(initial_capacity)));
-
-  // Fill the FixedArray with the hole value. Inline the code if short.
-  // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
-  static const int kLoopUnfoldLimit = 4;
-  if (initial_capacity <= kLoopUnfoldLimit) {
-    // Use a scratch register here to have only one reloc info when unfolding
-    // the loop.
-    __ mov(scratch3, factory->the_hole_value());
-    for (int i = 0; i < initial_capacity; i++) {
-      __ mov(FieldOperand(scratch1,
-                          FixedArray::kHeaderSize + i * kPointerSize),
-             scratch3);
-    }
-  } else {
-    Label loop, entry;
-    __ mov(scratch2, Immediate(initial_capacity));
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ mov(FieldOperand(scratch1,
-                        scratch2,
-                        times_pointer_size,
-                        FixedArray::kHeaderSize),
-           factory->the_hole_value());
-    __ bind(&entry);
-    __ dec(scratch2);
-    __ j(not_sign, &loop);
-  }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array and elements_array_end  (see
-// below for when that is not the case). If the parameter fill_with_holes is
-// true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
-                            Register array_function,  // Array function.
-                            Register array_size,  // As a smi, cannot be 0.
-                            Register result,
-                            Register elements_array,
-                            Register elements_array_end,
-                            Register scratch,
-                            bool fill_with_hole,
-                            Label* gc_required) {
-  ASSERT(scratch.is(edi));  // rep stos destination
-  ASSERT(!fill_with_hole || array_size.is(ecx));  // rep stos count
-  ASSERT(!fill_with_hole || !result.is(eax));  // result is never eax
-
-  __ LoadInitialArrayMap(array_function, scratch,
-                         elements_array, fill_with_hole);
-
-  // Allocate the JSArray object together with space for a FixedArray with the
-  // requested elements.
-  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ Allocate(JSArray::kSize + FixedArray::kHeaderSize,
-              times_pointer_size,
-              array_size,
-              REGISTER_VALUE_IS_SMI,
-              result,
-              elements_array_end,
-              scratch,
-              gc_required,
-              TAG_OBJECT);
-
-  // Allocated the JSArray. Now initialize the fields except for the elements
-  // array.
-  // result: JSObject
-  // elements_array: initial map
-  // elements_array_end: start of next object
-  // array_size: size of array (smi)
-  __ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
-  Factory* factory = masm->isolate()->factory();
-  __ mov(elements_array, factory->empty_fixed_array());
-  __ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
-  // Field JSArray::kElementsOffset is initialized later.
-  __ mov(FieldOperand(result, JSArray::kLengthOffset), array_size);
-
-  // Calculate the location of the elements array and set elements array member
-  // of the JSArray.
-  // result: JSObject
-  // elements_array_end: start of next object
-  // array_size: size of array (smi)
-  __ lea(elements_array, Operand(result, JSArray::kSize));
-  __ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
-
-  // Initialize the fixed array. FixedArray length is stored as a smi.
-  // result: JSObject
-  // elements_array: elements array
-  // elements_array_end: start of next object
-  // array_size: size of array (smi)
-  __ mov(FieldOperand(elements_array, FixedArray::kMapOffset),
-         factory->fixed_array_map());
-  // For non-empty JSArrays the length of the FixedArray and the JSArray is the
-  // same.
-  __ mov(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
-
-  // Fill the allocated FixedArray with the hole value if requested.
-  // result: JSObject
-  // elements_array: elements array
-  if (fill_with_hole) {
-    __ SmiUntag(array_size);
-    __ lea(edi, Operand(elements_array,
-                        FixedArray::kHeaderSize - kHeapObjectTag));
-    __ mov(eax, factory->the_hole_value());
-    __ cld();
-    // Do not use rep stos when filling less than kRepStosThreshold
-    // words.
-    const int kRepStosThreshold = 16;
-    Label loop, entry, done;
-    __ cmp(ecx, kRepStosThreshold);
-    __ j(below, &loop);  // Note: ecx > 0.
-    __ rep_stos();
-    __ jmp(&done);
-    __ bind(&loop);
-    __ stos();
-    __ bind(&entry);
-    __ cmp(edi, elements_array_end);
-    __ j(below, &loop);
-    __ bind(&done);
-  }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-//   edi: constructor (built-in Array function)
-//   eax: argc
-//   esp[0]: return address
-//   esp[4]: last argument
-// This function is used for both construct and normal calls of Array. Whether
-// it is a construct call or not is indicated by the construct_call parameter.
-// The only difference between handling a construct call and a normal call is
-// that for a construct call the constructor function in edi needs to be
-// preserved for entering the generic code. In both cases argc in eax needs to
-// be preserved.
-void ArrayNativeCode(MacroAssembler* masm,
-                     bool construct_call,
-                     Label* call_generic_code) {
-  Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
-      empty_array, not_empty_array, finish, cant_transition_map, not_double;
-
-  // Push the constructor and argc. No need to tag argc as a smi, as there will
-  // be no garbage collection with this on the stack.
-  int push_count = 0;
-  if (construct_call) {
-    push_count++;
-    __ push(edi);
-  }
-  push_count++;
-  __ push(eax);
-
-  // Check for array construction with zero arguments.
-  __ test(eax, eax);
-  __ j(not_zero, &argc_one_or_more);
-
-  __ bind(&empty_array);
-  // Handle construction of an empty array.
-  AllocateEmptyJSArray(masm,
-                       edi,
-                       eax,
-                       ebx,
-                       ecx,
-                       edi,
-                       &prepare_generic_code_call);
-  __ IncrementCounter(masm->isolate()->counters()->array_function_native(), 1);
-  __ pop(ebx);
-  if (construct_call) {
-    __ pop(edi);
-  }
-  __ ret(kPointerSize);
-
-  // Check for one argument. Bail out if argument is not smi or if it is
-  // negative.
-  __ bind(&argc_one_or_more);
-  __ cmp(eax, 1);
-  __ j(not_equal, &argc_two_or_more);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
-  __ test(ecx, ecx);
-  __ j(not_zero, &not_empty_array);
-
-  // The single argument passed is zero, so we jump to the code above used to
-  // handle the case of no arguments passed. To adapt the stack for that we move
-  // the return address and the pushed constructor (if pushed) one stack slot up
-  // thereby removing the passed argument. Argc is also on the stack - at the
-  // bottom - and it needs to be changed from 1 to 0 to have the call into the
-  // runtime system work in case a GC is required.
-  for (int i = push_count; i > 0; i--) {
-    __ mov(eax, Operand(esp, i * kPointerSize));
-    __ mov(Operand(esp, (i + 1) * kPointerSize), eax);
-  }
-  __ Drop(2);  // Drop two stack slots.
-  __ push(Immediate(0));  // Treat this as a call with argc of zero.
-  __ jmp(&empty_array);
-
-  __ bind(&not_empty_array);
-  __ test(ecx, Immediate(kIntptrSignBit | kSmiTagMask));
-  __ j(not_zero, &prepare_generic_code_call);
-
-  // Handle construction of an empty array of a certain size. Get the size from
-  // the stack and bail out if size is to large to actually allocate an elements
-  // array.
-  __ cmp(ecx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
-  __ j(greater_equal, &prepare_generic_code_call);
-
-  // edx: array_size (smi)
-  // edi: constructor
-  // esp[0]: argc (cannot be 0 here)
-  // esp[4]: constructor (only if construct_call)
-  // esp[8]: return address
-  // esp[C]: argument
-  AllocateJSArray(masm,
-                  edi,
-                  ecx,
-                  ebx,
-                  eax,
-                  edx,
-                  edi,
-                  true,
-                  &prepare_generic_code_call);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->array_function_native(), 1);
-  __ mov(eax, ebx);
-  __ pop(ebx);
-  if (construct_call) {
-    __ pop(edi);
-  }
-  __ ret(2 * kPointerSize);
-
-  // Handle construction of an array from a list of arguments.
-  __ bind(&argc_two_or_more);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ SmiTag(eax);  // Convet argc to a smi.
-  // eax: array_size (smi)
-  // edi: constructor
-  // esp[0] : argc
-  // esp[4]: constructor (only if construct_call)
-  // esp[8] : return address
-  // esp[C] : last argument
-  AllocateJSArray(masm,
-                  edi,
-                  eax,
-                  ebx,
-                  ecx,
-                  edx,
-                  edi,
-                  false,
-                  &prepare_generic_code_call);
-  __ IncrementCounter(counters->array_function_native(), 1);
-  __ push(ebx);
-  __ mov(ebx, Operand(esp, kPointerSize));
-  // ebx: argc
-  // edx: elements_array_end (untagged)
-  // esp[0]: JSArray
-  // esp[4]: argc
-  // esp[8]: constructor (only if construct_call)
-  // esp[12]: return address
-  // esp[16]: last argument
-
-  // Location of the last argument
-  int last_arg_offset = (construct_call ? 4 : 3) * kPointerSize;
-  __ lea(edi, Operand(esp, last_arg_offset));
-
-  // Location of the first array element (Parameter fill_with_holes to
-  // AllocateJSArray is false, so the FixedArray is returned in ecx).
-  __ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
-
-  Label has_non_smi_element;
-
-  // ebx: argc
-  // edx: location of the first array element
-  // edi: location of the last argument
-  // esp[0]: JSArray
-  // esp[4]: argc
-  // esp[8]: constructor (only if construct_call)
-  // esp[12]: return address
-  // esp[16]: last argument
-  Label loop, entry;
-  __ mov(ecx, ebx);
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
-  if (FLAG_smi_only_arrays) {
-    __ JumpIfNotSmi(eax, &has_non_smi_element);
-  }
-  __ mov(Operand(edx, 0), eax);
-  __ add(edx, Immediate(kPointerSize));
-  __ bind(&entry);
-  __ dec(ecx);
-  __ j(greater_equal, &loop);
-
-  // Remove caller arguments from the stack and return.
-  // ebx: argc
-  // esp[0]: JSArray
-  // esp[4]: argc
-  // esp[8]: constructor (only if construct_call)
-  // esp[12]: return address
-  // esp[16]: last argument
-  __ bind(&finish);
-  __ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
-  __ pop(eax);
-  __ pop(ebx);
-  __ lea(esp, Operand(esp, ebx, times_pointer_size,
-                      last_arg_offset - kPointerSize));
-  __ jmp(ecx);
-
-  __ bind(&has_non_smi_element);
-  // Double values are handled by the runtime.
-  __ CheckMap(eax,
-              masm->isolate()->factory()->heap_number_map(),
-              &not_double,
-              DONT_DO_SMI_CHECK);
-  __ bind(&cant_transition_map);
-  // Throw away the array that's only been partially constructed.
-  __ pop(eax);
-  __ UndoAllocationInNewSpace(eax);
-  __ jmp(&prepare_generic_code_call);
-
-  __ bind(&not_double);
-  // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
-  __ mov(ebx, Operand(esp, 0));
-  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(
-      FAST_SMI_ELEMENTS,
-      FAST_ELEMENTS,
-      edi,
-      eax,
-      &cant_transition_map);
-  __ mov(FieldOperand(ebx, HeapObject::kMapOffset), edi);
-  __ RecordWriteField(ebx, HeapObject::kMapOffset, edi, eax,
-                      kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-  // Prepare to re-enter the loop
-  __ lea(edi, Operand(esp, last_arg_offset));
-
-  // Finish the array initialization loop.
-  Label loop2;
-  __ bind(&loop2);
-  __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
-  __ mov(Operand(edx, 0), eax);
-  __ add(edx, Immediate(kPointerSize));
-  __ dec(ecx);
-  __ j(greater_equal, &loop2);
-  __ jmp(&finish);
-
-  // Restore argc and constructor before running the generic code.
-  __ bind(&prepare_generic_code_call);
-  __ pop(eax);
-  if (construct_call) {
-    __ pop(edi);
-  }
-  __ jmp(call_generic_code);
-}
-
-
 void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax : argc
@@ -1459,20 +1040,9 @@
 
   // Run the native code for the InternalArray function called as a normal
   // function.
-  if (FLAG_optimize_constructed_arrays) {
-    // tail call a stub
-    InternalArrayConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    ArrayNativeCode(masm, false, &generic_array_code);
-
-    // Jump to the generic internal array code in case the specialized code
-    // cannot handle the construction.
-    __ bind(&generic_array_code);
-    Handle<Code> array_code =
-        masm->isolate()->builtins()->InternalArrayCodeGeneric();
-    __ jmp(array_code, RelocInfo::CODE_TARGET);
-  }
+  // tail call a stub
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
 
@@ -1498,58 +1068,13 @@
   }
 
   // Run the native code for the Array function called as a normal function.
-  if (FLAG_optimize_constructed_arrays) {
-    // tail call a stub
-    Handle<Object> undefined_sentinel(
-        masm->isolate()->heap()->undefined_value(),
-        masm->isolate());
-    __ mov(ebx, Immediate(undefined_sentinel));
-    ArrayConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    ArrayNativeCode(masm, false, &generic_array_code);
-
-    // Jump to the generic internal array code in case the specialized code
-    // cannot handle the construction.
-    __ bind(&generic_array_code);
-    Handle<Code> array_code =
-        masm->isolate()->builtins()->ArrayCodeGeneric();
-    __ jmp(array_code, RelocInfo::CODE_TARGET);
-  }
-}
-
-
-void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax : argc
-  //  -- ebx : type info cell
-  //  -- edi : constructor
-  //  -- esp[0] : return address
-  //  -- esp[4] : last argument
-  // -----------------------------------
-  if (FLAG_debug_code) {
-    // The array construct code is only set for the global and natives
-    // builtin Array functions which always have maps.
-
-    // Initial map for the builtin Array function should be a map.
-    __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
-    // Will both indicate a NULL and a Smi.
-    __ test(ecx, Immediate(kSmiTagMask));
-    __ Assert(not_zero, "Unexpected initial map for Array function");
-    __ CmpObjectType(ecx, MAP_TYPE, ecx);
-    __ Assert(equal, "Unexpected initial map for Array function");
-  }
-
-  Label generic_constructor;
-  // Run the native code for the Array function called as constructor.
-  ArrayNativeCode(masm, true, &generic_constructor);
-
-  // Jump to the generic construct code in case the specialized code cannot
-  // handle the construction.
-  __ bind(&generic_constructor);
-  Handle<Code> generic_construct_stub =
-      masm->isolate()->builtins()->JSConstructStubGeneric();
-  __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+  // tail call a stub
+  Handle<Object> undefined_sentinel(
+      masm->isolate()->heap()->undefined_value(),
+      masm->isolate());
+  __ mov(ebx, Immediate(undefined_sentinel));
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index f3483eb..8cd4685 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -1527,7 +1527,7 @@
       UNREACHABLE();
   }
 
-  if (op_ == Token::MOD && has_fixed_right_arg_) {
+  if (op_ == Token::MOD && encoded_right_arg_.has_value) {
     // It is guaranteed that the value will fit into a Smi, because if it
     // didn't, we wouldn't be here, see BinaryOp_Patch.
     __ cmp(eax, Immediate(Smi::FromInt(fixed_right_arg_value())));
@@ -1669,7 +1669,7 @@
         FloatingPointHelper::CheckSSE2OperandIsInt32(
             masm, &not_int32, xmm1, edi, ecx, xmm2);
         if (op_ == Token::MOD) {
-          if (has_fixed_right_arg_) {
+          if (encoded_right_arg_.has_value) {
             __ cmp(edi, Immediate(fixed_right_arg_value()));
             __ j(not_equal, &right_arg_changed);
           }
@@ -4678,51 +4678,12 @@
 }
 
 
-static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
-  // Cache the called function in a global property cell.  Cache states
-  // are uninitialized, monomorphic (indicated by a JSFunction), and
-  // megamorphic.
-  // ebx : cache cell for call target
-  // edi : the function to call
-  Isolate* isolate = masm->isolate();
-  Label initialize, done;
-
-  // Load the cache state into ecx.
-  __ mov(ecx, FieldOperand(ebx, PropertyCell::kValueOffset));
-
-  // A monomorphic cache hit or an already megamorphic state: invoke the
-  // function without changing the state.
-  __ cmp(ecx, edi);
-  __ j(equal, &done, Label::kNear);
-  __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
-  __ j(equal, &done, Label::kNear);
-
-  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
-  // megamorphic.
-  __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
-  __ j(equal, &initialize, Label::kNear);
-  // MegamorphicSentinel is an immortal immovable object (undefined) so no
-  // write-barrier is needed.
-  __ mov(FieldOperand(ebx, Cell::kValueOffset),
-         Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
-  __ jmp(&done, Label::kNear);
-
-  // An uninitialized cache is patched with the function.
-  __ bind(&initialize);
-  __ mov(FieldOperand(ebx, Cell::kValueOffset), edi);
-  // No need for a write barrier here - cells are rescanned.
-
-  __ bind(&done);
-}
-
-
 static void GenerateRecordCallTarget(MacroAssembler* masm) {
   // Cache the called function in a global property cell.  Cache states
   // are uninitialized, monomorphic (indicated by a JSFunction), and
   // megamorphic.
   // ebx : cache cell for call target
   // edi : the function to call
-  ASSERT(FLAG_optimize_constructed_arrays);
   Isolate* isolate = masm->isolate();
   Label initialize, done, miss, megamorphic, not_array_function;
 
@@ -4739,12 +4700,15 @@
   // Special handling of the Array() function, which caches not only the
   // monomorphic Array function but the initial ElementsKind with special
   // sentinels
-  Handle<Object> terminal_kind_sentinel =
-      TypeFeedbackCells::MonomorphicArraySentinel(isolate,
-                                                  LAST_FAST_ELEMENTS_KIND);
   __ JumpIfNotSmi(ecx, &miss);
-  __ cmp(ecx, Immediate(terminal_kind_sentinel));
-  __ j(above, &miss);
+  if (FLAG_debug_code) {
+    Handle<Object> terminal_kind_sentinel =
+        TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
+                                                    LAST_FAST_ELEMENTS_KIND);
+    __ cmp(ecx, Immediate(terminal_kind_sentinel));
+    __ Assert(less_equal, "Array function sentinel is not an ElementsKind");
+  }
+
   // Load the global or builtins object from the current context
   __ LoadGlobalContext(ecx);
   // Make sure the function is the Array() function
@@ -4824,11 +4788,7 @@
   __ j(not_equal, &slow);
 
   if (RecordCallTarget()) {
-    if (FLAG_optimize_constructed_arrays) {
-      GenerateRecordCallTarget(masm);
-    } else {
-      GenerateRecordCallTargetNoArray(masm);
-    }
+    GenerateRecordCallTarget(masm);
   }
 
   // Fast-case: Just invoke the function.
@@ -4901,15 +4861,11 @@
   __ j(not_equal, &slow);
 
   if (RecordCallTarget()) {
-    if (FLAG_optimize_constructed_arrays) {
-      GenerateRecordCallTarget(masm);
-    } else {
-      GenerateRecordCallTargetNoArray(masm);
-    }
+    GenerateRecordCallTarget(masm);
   }
 
   // Jump to the function-specific construct stub.
-  Register jmp_reg = FLAG_optimize_constructed_arrays ? ecx : ebx;
+  Register jmp_reg = ecx;
   __ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   __ mov(jmp_reg, FieldOperand(jmp_reg,
                                SharedFunctionInfo::kConstructStubOffset));
@@ -4955,9 +4911,7 @@
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
   // It is important that the store buffer overflow stubs are generated first.
   RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
-  if (FLAG_optimize_constructed_arrays) {
-    ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
-  }
+  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
 }
 
 
@@ -5050,11 +5004,6 @@
     Label okay;
     __ cmp(eax, masm->isolate()->factory()->the_hole_value());
     __ j(not_equal, &okay, Label::kNear);
-    // TODO(wingo): Currently SuspendJSGeneratorObject returns the hole.  Change
-    // to return another sentinel like a harmony symbol.
-    __ cmp(ebx, Immediate(ExternalReference(
-        Runtime::kSuspendJSGeneratorObject, masm->isolate())));
-    __ j(equal, &okay, Label::kNear);
     __ int3();
     __ bind(&okay);
   }
@@ -5131,6 +5080,8 @@
   // esi: current context (C callee-saved)
   // edi: JS function of the caller (C callee-saved)
 
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // NOTE: Invocations of builtins may return failure objects instead
   // of a proper result. The builtin entry handles this by performing
   // a garbage collection and retrying the builtin (twice).
@@ -5204,6 +5155,8 @@
   Label invoke, handler_entry, exit;
   Label not_outermost_js, not_outermost_js_2;
 
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // Set up frame.
   __ push(ebp);
   __ mov(ebp, esp);
@@ -6907,9 +6860,13 @@
   __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kInternalizedTag != 0);
-  __ and_(tmp1, tmp2);
-  __ test(tmp1, Immediate(kIsInternalizedMask));
-  __ j(zero, &miss, Label::kNear);
+  __ and_(tmp1, Immediate(kIsNotStringMask | kIsInternalizedMask));
+  __ cmpb(tmp1, kInternalizedTag | kStringTag);
+  __ j(not_equal, &miss, Label::kNear);
+
+  __ and_(tmp2, Immediate(kIsNotStringMask | kIsInternalizedMask));
+  __ cmpb(tmp2, kInternalizedTag | kStringTag);
+  __ j(not_equal, &miss, Label::kNear);
 
   // Internalized strings are compared by identity.
   Label done;
@@ -6954,19 +6911,8 @@
   __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
 
-  Label succeed1;
-  __ test(tmp1, Immediate(kIsInternalizedMask));
-  __ j(not_zero, &succeed1);
-  __ cmpb(tmp1, static_cast<uint8_t>(SYMBOL_TYPE));
-  __ j(not_equal, &miss);
-  __ bind(&succeed1);
-
-  Label succeed2;
-  __ test(tmp2, Immediate(kIsInternalizedMask));
-  __ j(not_zero, &succeed2);
-  __ cmpb(tmp2, static_cast<uint8_t>(SYMBOL_TYPE));
-  __ j(not_equal, &miss);
-  __ bind(&succeed2);
+  __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
+  __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
 
   // Unique names are compared by identity.
   Label done;
@@ -7031,7 +6977,8 @@
 
   // Check that both strings are internalized. If they are, we're done
   // because we already know they are not identical.  But in the case of
-  // non-equality compare, we still need to determine the order.
+  // non-equality compare, we still need to determine the order. We
+  // also know they are both strings.
   if (equality) {
     Label do_compare;
     STATIC_ASSERT(kInternalizedTag != 0);
@@ -7190,12 +7137,8 @@
 
     // Check if the entry name is not a unique name.
     __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
-    __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-              kIsInternalizedMask);
-    __ j(not_zero, &good);
-    __ cmpb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-            static_cast<uint8_t>(SYMBOL_TYPE));
-    __ j(not_equal, miss);
+    __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+                           miss);
     __ bind(&good);
   }
 
@@ -7328,15 +7271,9 @@
       // key we are looking for.
 
       // Check if the entry name is not a unique name.
-      Label cont;
       __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
-      __ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
-                kIsInternalizedMask);
-      __ j(not_zero, &cont);
-      __ cmpb(FieldOperand(scratch, Map::kInstanceTypeOffset),
-              static_cast<uint8_t>(SYMBOL_TYPE));
-      __ j(not_equal, &maybe_in_dictionary);
-      __ bind(&cont);
+      __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
+                             &maybe_in_dictionary);
     }
   }
 
@@ -7761,7 +7698,11 @@
 
 
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
-  if (entry_hook_ != NULL) {
+  if (masm->isolate()->function_entry_hook() != NULL) {
+    // It's always safe to call the entry hook stub, as the hook itself
+    // is not allowed to call back to V8.
+    AllowStubCallsScope allow_stub_calls(masm, true);
+
     ProfileEntryHookStub stub;
     masm->CallStub(&stub);
   }
@@ -7769,9 +7710,11 @@
 
 
 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
-  // Ecx is the only volatile register we must save.
-  const int kNumSavedRegisters = 1;
+  // Save volatile registers.
+  const int kNumSavedRegisters = 3;
+  __ push(eax);
   __ push(ecx);
+  __ push(edx);
 
   // Calculate and push the original stack pointer.
   __ lea(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
@@ -7784,12 +7727,16 @@
   __ push(eax);
 
   // Call the entry hook.
-  int32_t hook_location = reinterpret_cast<int32_t>(&entry_hook_);
-  __ call(Operand(hook_location, RelocInfo::NONE32));
+  ASSERT(masm->isolate()->function_entry_hook() != NULL);
+  __ call(FUNCTION_ADDR(masm->isolate()->function_entry_hook()),
+          RelocInfo::RUNTIME_ENTRY);
   __ add(esp, Immediate(2 * kPointerSize));
 
   // Restore ecx.
+  __ pop(edx);
   __ pop(ecx);
+  __ pop(eax);
+
   __ ret(0);
 }
 
@@ -7847,6 +7794,10 @@
   __ cmp(ebx, Immediate(undefined_sentinel));
   __ j(equal, &normal_sequence);
 
+  // The type cell may have gone megamorphic, don't overwrite if so
+  __ mov(ecx, FieldOperand(ebx, kPointerSize));
+  __ JumpIfNotSmi(ecx, &normal_sequence);
+
   // Save the resulting elements kind in type info
   __ SmiTag(edx);
   __ mov(FieldOperand(ebx, kPointerSize), edx);
@@ -7876,10 +7827,10 @@
       TERMINAL_FAST_ELEMENTS_KIND);
   for (int i = 0; i <= to_index; ++i) {
     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
-    T stub(kind, false);
+    T stub(kind);
     stub.GetCode(isolate)->set_is_pregenerated(true);
     if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
-      T stub1(kind, true);
+      T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
       stub1.GetCode(isolate)->set_is_pregenerated(true);
     }
   }
@@ -7945,52 +7896,39 @@
     __ bind(&okay_here);
   }
 
-  if (FLAG_optimize_constructed_arrays) {
-    Label no_info, switch_ready;
-    // Get the elements kind and case on that.
-    __ cmp(ebx, Immediate(undefined_sentinel));
-    __ j(equal, &no_info);
-    __ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
-    __ JumpIfNotSmi(edx, &no_info);
-    __ SmiUntag(edx);
-    __ jmp(&switch_ready);
-    __ bind(&no_info);
-    __ mov(edx, Immediate(GetInitialFastElementsKind()));
-    __ bind(&switch_ready);
+  Label no_info, switch_ready;
+  // Get the elements kind and case on that.
+  __ cmp(ebx, Immediate(undefined_sentinel));
+  __ j(equal, &no_info);
+  __ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
+  __ JumpIfNotSmi(edx, &no_info);
+  __ SmiUntag(edx);
+  __ jmp(&switch_ready);
+  __ bind(&no_info);
+  __ mov(edx, Immediate(GetInitialFastElementsKind()));
+  __ bind(&switch_ready);
 
-    if (argument_count_ == ANY) {
-      Label not_zero_case, not_one_case;
-      __ test(eax, eax);
-      __ j(not_zero, &not_zero_case);
-      CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+  if (argument_count_ == ANY) {
+    Label not_zero_case, not_one_case;
+    __ test(eax, eax);
+    __ j(not_zero, &not_zero_case);
+    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
 
-      __ bind(&not_zero_case);
-      __ cmp(eax, 1);
-      __ j(greater, &not_one_case);
-      CreateArrayDispatchOneArgument(masm);
+    __ bind(&not_zero_case);
+    __ cmp(eax, 1);
+    __ j(greater, &not_one_case);
+    CreateArrayDispatchOneArgument(masm);
 
-      __ bind(&not_one_case);
-      CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
-    } else if (argument_count_ == NONE) {
-      CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-    } else if (argument_count_ == ONE) {
-      CreateArrayDispatchOneArgument(masm);
-    } else if (argument_count_ == MORE_THAN_ONE) {
-      CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
-    } else {
-      UNREACHABLE();
-    }
+    __ bind(&not_one_case);
+    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
+  } else if (argument_count_ == NONE) {
+    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+  } else if (argument_count_ == ONE) {
+    CreateArrayDispatchOneArgument(masm);
+  } else if (argument_count_ == MORE_THAN_ONE) {
+    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
   } else {
-    Label generic_constructor;
-    // Run the native code for the Array function called as constructor.
-    ArrayNativeCode(masm, true, &generic_constructor);
-
-    // Jump to the generic construct code in case the specialized code cannot
-    // handle the construction.
-    __ bind(&generic_constructor);
-    Handle<Code> generic_construct_stub =
-        masm->isolate()->builtins()->JSConstructStubGeneric();
-    __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+    UNREACHABLE();
   }
 }
 
@@ -8053,46 +7991,33 @@
     __ Assert(equal, "Unexpected initial map for Array function");
   }
 
-  if (FLAG_optimize_constructed_arrays) {
-    // Figure out the right elements kind
-    __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+  // Figure out the right elements kind
+  __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
 
-    // Load the map's "bit field 2" into |result|. We only need the first byte,
-    // but the following masking takes care of that anyway.
-    __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
-    // Retrieve elements_kind from bit field 2.
-    __ and_(ecx, Map::kElementsKindMask);
-    __ shr(ecx, Map::kElementsKindShift);
+  // Load the map's "bit field 2" into |result|. We only need the first byte,
+  // but the following masking takes care of that anyway.
+  __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+  // Retrieve elements_kind from bit field 2.
+  __ and_(ecx, Map::kElementsKindMask);
+  __ shr(ecx, Map::kElementsKindShift);
 
-    if (FLAG_debug_code) {
-      Label done;
-      __ cmp(ecx, Immediate(FAST_ELEMENTS));
-      __ j(equal, &done);
-      __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
-      __ Assert(equal,
-          "Invalid ElementsKind for InternalArray or InternalPackedArray");
-      __ bind(&done);
-    }
-
-    Label fast_elements_case;
+  if (FLAG_debug_code) {
+    Label done;
     __ cmp(ecx, Immediate(FAST_ELEMENTS));
-    __ j(equal, &fast_elements_case);
-    GenerateCase(masm, FAST_HOLEY_ELEMENTS);
-
-    __ bind(&fast_elements_case);
-    GenerateCase(masm, FAST_ELEMENTS);
-  } else {
-    Label generic_constructor;
-    // Run the native code for the Array function called as constructor.
-    ArrayNativeCode(masm, true, &generic_constructor);
-
-    // Jump to the generic construct code in case the specialized code cannot
-    // handle the construction.
-    __ bind(&generic_constructor);
-    Handle<Code> generic_construct_stub =
-        masm->isolate()->builtins()->JSConstructStubGeneric();
-    __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+    __ j(equal, &done);
+    __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
+    __ Assert(equal,
+              "Invalid ElementsKind for InternalArray or InternalPackedArray");
+    __ bind(&done);
   }
+
+  Label fast_elements_case;
+  __ cmp(ecx, Immediate(FAST_ELEMENTS));
+  __ j(equal, &fast_elements_case);
+  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+  __ bind(&fast_elements_case);
+  GenerateCase(masm, FAST_ELEMENTS);
 }
 
 
diff --git a/src/ia32/frames-ia32.cc b/src/ia32/frames-ia32.cc
index ea19e9f..61d6876 100644
--- a/src/ia32/frames-ia32.cc
+++ b/src/ia32/frames-ia32.cc
@@ -32,17 +32,12 @@
 #include "assembler.h"
 #include "assembler-ia32.h"
 #include "assembler-ia32-inl.h"
-#include "frames-inl.h"
+#include "frames.h"
 
 namespace v8 {
 namespace internal {
 
 
-Address ExitFrame::ComputeStackPointer(Address fp) {
-  return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
 Register JavaScriptFrame::fp_register() { return ebp; }
 Register JavaScriptFrame::context_register() { return esi; }
 
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 54e9eaf..d04fdd4 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1639,10 +1639,10 @@
         ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
-        if (key->handle()->IsInternalizedString()) {
+        if (key->value()->IsInternalizedString()) {
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
-            __ mov(ecx, Immediate(key->handle()));
+            __ mov(ecx, Immediate(key->value()));
             __ mov(edx, Operand(esp, 0));
             Handle<Code> ic = is_classic_mode()
                 ? isolate()->builtins()->StoreIC_Initialize()
@@ -1782,10 +1782,7 @@
     Expression* subexpr = subexprs->at(i);
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
-    if (subexpr->AsLiteral() != NULL ||
-        CompileTimeValue::IsCompileTimeValue(subexpr)) {
-      continue;
-    }
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     if (!result_saved) {
       __ push(eax);  // array literal.
@@ -1953,14 +1950,30 @@
       __ push(result_register());
       // Fall through.
     case Yield::INITIAL: {
-      VisitForStackValue(expr->generator_object());
+      Label suspend, continuation, post_runtime, resume;
+
+      __ jmp(&suspend);
+
+      __ bind(&continuation);
+      __ jmp(&resume);
+
+      __ bind(&suspend);
+      VisitForAccumulatorValue(expr->generator_object());
+      ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+      __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
+             Immediate(Smi::FromInt(continuation.pos())));
+      __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
+      __ mov(ecx, esi);
+      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+                          kDontSaveFPRegs);
+      __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
+      __ cmp(esp, ebx);
+      __ j(equal, &post_runtime);
+      __ push(eax);  // generator object
       __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ mov(context_register(),
              Operand(ebp, StandardFrameConstants::kContextOffset));
-
-      Label resume;
-      __ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
-      __ j(not_equal, &resume);
+      __ bind(&post_runtime);
       __ pop(result_register());
       EmitReturnSequence();
 
@@ -1988,7 +2001,8 @@
       // [sp + 1 * kPointerSize] iter
       // [sp + 0 * kPointerSize] g
 
-      Label l_catch, l_try, l_resume, l_next, l_call, l_loop;
+      Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+      Label l_next, l_call, l_loop;
       // Initial send value is undefined.
       __ mov(eax, isolate()->factory()->undefined_value());
       __ jmp(&l_next);
@@ -2010,12 +2024,23 @@
       __ PushTryHandler(StackHandler::CATCH, expr->index());
       const int handler_size = StackHandlerConstants::kSize;
       __ push(eax);                                      // result
-      __ push(Operand(esp, (0 + 1) * kPointerSize + handler_size));  // g
+      __ jmp(&l_suspend);
+      __ bind(&l_continuation);
+      __ jmp(&l_resume);
+      __ bind(&l_suspend);
+      const int generator_object_depth = kPointerSize + handler_size;
+      __ mov(eax, Operand(esp, generator_object_depth));
+      __ push(eax);                                      // g
+      ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+      __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
+             Immediate(Smi::FromInt(l_continuation.pos())));
+      __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
+      __ mov(ecx, esi);
+      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+                          kDontSaveFPRegs);
       __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ mov(context_register(),
              Operand(ebp, StandardFrameConstants::kContextOffset));
-      __ CompareRoot(eax, Heap::kTheHoleValueRootIndex);
-      __ j(not_equal, &l_resume);
       __ pop(eax);                                       // result
       EmitReturnSequence();
       __ bind(&l_resume);                                // received in eax
@@ -2091,7 +2116,7 @@
   __ mov(ecx, isolate()->factory()->the_hole_value());
   Label push_argument_holes, push_frame;
   __ bind(&push_argument_holes);
-  __ sub(edx, Immediate(1));
+  __ sub(edx, Immediate(Smi::FromInt(1)));
   __ j(carry, &push_frame);
   __ push(ecx);
   __ jmp(&push_argument_holes);
@@ -2193,8 +2218,8 @@
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
-  ASSERT(!key->handle()->IsSmi());
-  __ mov(ecx, Immediate(key->handle()));
+  ASSERT(!key->value()->IsSmi());
+  __ mov(ecx, Immediate(key->value()));
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
 }
@@ -2348,7 +2373,7 @@
       VisitForAccumulatorValue(prop->obj());
       __ mov(edx, eax);
       __ pop(eax);  // Restore value.
-      __ mov(ecx, prop->key()->AsLiteral()->handle());
+      __ mov(ecx, prop->key()->AsLiteral()->value());
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->StoreIC_Initialize()
           : isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2475,7 +2500,7 @@
 
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  __ mov(ecx, prop->key()->AsLiteral()->handle());
+  __ mov(ecx, prop->key()->AsLiteral()->value());
   __ pop(edx);
   Handle<Code> ic = is_classic_mode()
       ? isolate()->builtins()->StoreIC_Initialize()
@@ -2740,7 +2765,7 @@
     }
     if (property->key()->IsPropertyName()) {
       EmitCallWithIC(expr,
-                     property->key()->AsLiteral()->handle(),
+                     property->key()->AsLiteral()->value(),
                      RelocInfo::CODE_TARGET);
     } else {
       EmitKeyedCallWithIC(expr, property->key());
@@ -3358,7 +3383,7 @@
   ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   ASSERT_NE(NULL, args->at(1)->AsLiteral());
-  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
@@ -3787,7 +3812,7 @@
   ASSERT_EQ(2, args->length());
 
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
-  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
 
   Handle<FixedArray> jsfunction_result_caches(
       isolate()->native_context()->jsfunction_result_caches());
@@ -4499,7 +4524,7 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ mov(ecx, prop->key()->AsLiteral()->handle());
+      __ mov(ecx, prop->key()->AsLiteral()->value());
       __ pop(edx);
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->StoreIC_Initialize()
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 2e27097..10d6c86 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -317,7 +317,8 @@
   __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
   __ j(zero, index_string);
 
-  // Is the string internalized?
+  // Is the string internalized? We already know it's a string so a single
+  // bit test is enough.
   STATIC_ASSERT(kInternalizedTag != 0);
   __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsInternalizedMask);
   __ j(zero, not_unique);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 2240d10..694ae13 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -74,7 +74,7 @@
 #define __ masm()->
 
 bool LCodeGen::GenerateCode() {
-  HPhase phase("Z_Code generation", chunk());
+  LPhase phase("Z_Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
 
@@ -109,7 +109,7 @@
   if (!info()->IsStub()) {
     Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
   }
-  info()->CommitDependentMaps(code);
+  info()->CommitDependencies(code);
 }
 
 
@@ -657,27 +657,6 @@
 
   for (int i = 0; i < translation_size; ++i) {
     LOperand* value = environment->values()->at(i);
-    // spilled_registers_ and spilled_double_registers_ are either
-    // both NULL or both set.
-    if (environment->spilled_registers() != NULL && value != NULL) {
-      if (value->IsRegister() &&
-          environment->spilled_registers()[value->index()] != NULL) {
-        translation->MarkDuplicate();
-        AddToTranslation(translation,
-                         environment->spilled_registers()[value->index()],
-                         environment->HasTaggedValueAt(i),
-                         environment->HasUint32ValueAt(i));
-      } else if (
-          value->IsDoubleRegister() &&
-          environment->spilled_double_registers()[value->index()] != NULL) {
-        translation->MarkDuplicate();
-        AddToTranslation(
-            translation,
-            environment->spilled_double_registers()[value->index()],
-            false,
-            false);
-      }
-    }
 
     // TODO(mstarzinger): Introduce marker operands to indicate that this value
     // is not present and must be reconstructed from the deoptimizer. Currently
@@ -687,12 +666,6 @@
       translation->BeginArgumentsObject(arguments_count);
       for (int i = 0; i < arguments_count; ++i) {
         LOperand* value = environment->values()->at(translation_size + i);
-        ASSERT(environment->spilled_registers() == NULL ||
-               !value->IsRegister() ||
-               environment->spilled_registers()[value->index()] == NULL);
-        ASSERT(environment->spilled_registers() == NULL ||
-               !value->IsDoubleRegister() ||
-               environment->spilled_double_registers()[value->index()] == NULL);
         AddToTranslation(translation,
                          value,
                          environment->HasTaggedValueAt(translation_size + i),
@@ -1189,7 +1162,8 @@
 
 
 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
-  // Nothing to do.
+  // Record the address of the first unknown OSR value as the place to enter.
+  if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
 }
 
 
@@ -1223,12 +1197,12 @@
     __ and_(left_reg, divisor - 1);
     __ bind(&done);
 
-  } else if (hmod->has_fixed_right_arg()) {
+  } else if (hmod->fixed_right_arg().has_value) {
     Register left_reg = ToRegister(instr->left());
     ASSERT(left_reg.is(ToRegister(instr->result())));
     Register right_reg = ToRegister(instr->right());
 
-    int32_t divisor = hmod->fixed_right_arg_value();
+    int32_t divisor = hmod->fixed_right_arg().value;
     ASSERT(IsPowerOf2(divisor));
 
     // Check if our assumption of a fixed right operand still holds.
@@ -1836,8 +1810,11 @@
   ASSERT(input.is(result));
 
   Label done;
-  // If the object is a smi return the object.
-  __ JumpIfSmi(input, &done, Label::kNear);
+
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    // If the object is a smi return the object.
+    __ JumpIfSmi(input, &done, Label::kNear);
+  }
 
   // If the object is not a value type, return the object.
   __ CmpObjectType(input, JS_VALUE_TYPE, map);
@@ -2087,10 +2064,12 @@
 }
 
 
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
+  int right_block = instr->FalseDestination(chunk_);
+  int left_block = instr->TrueDestination(chunk_);
+
   int next_block = GetNextEmittedBlock();
-  right_block = chunk_->LookupDestination(right_block);
-  left_block = chunk_->LookupDestination(left_block);
 
   if (right_block == left_block) {
     EmitGoto(left_block);
@@ -2106,22 +2085,19 @@
 
 
 void LCodeGen::DoBranch(LBranch* instr) {
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
   Representation r = instr->hydrogen()->value()->representation();
   if (r.IsSmiOrInteger32()) {
     ASSERT(!info()->IsStub());
     Register reg = ToRegister(instr->value());
     __ test(reg, Operand(reg));
-    EmitBranch(true_block, false_block, not_zero);
+    EmitBranch(instr, not_zero);
   } else if (r.IsDouble()) {
     ASSERT(!info()->IsStub());
     CpuFeatureScope scope(masm(), SSE2);
     XMMRegister reg = ToDoubleRegister(instr->value());
     __ xorps(xmm0, xmm0);
     __ ucomisd(reg, xmm0);
-    EmitBranch(true_block, false_block, not_equal);
+    EmitBranch(instr, not_equal);
   } else {
     ASSERT(r.IsTagged());
     Register reg = ToRegister(instr->value());
@@ -2129,43 +2105,52 @@
     if (type.IsBoolean()) {
       ASSERT(!info()->IsStub());
       __ cmp(reg, factory()->true_value());
-      EmitBranch(true_block, false_block, equal);
+      EmitBranch(instr, equal);
     } else if (type.IsSmi()) {
       ASSERT(!info()->IsStub());
       __ test(reg, Operand(reg));
-      EmitBranch(true_block, false_block, not_equal);
+      EmitBranch(instr, not_equal);
+    } else if (type.IsJSArray()) {
+      ASSERT(!info()->IsStub());
+      EmitBranch(instr, no_condition);
+    } else if (type.IsHeapNumber()) {
+      ASSERT(!info()->IsStub());
+      CpuFeatureScope scope(masm(), SSE2);
+      __ xorps(xmm0, xmm0);
+      __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+      EmitBranch(instr, not_equal);
+    } else if (type.IsString()) {
+      ASSERT(!info()->IsStub());
+      __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+      EmitBranch(instr, not_equal);
     } else {
-      Label* true_label = chunk_->GetAssemblyLabel(true_block);
-      Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
-      // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
 
       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
         // undefined -> false.
         __ cmp(reg, factory()->undefined_value());
-        __ j(equal, false_label);
+        __ j(equal, instr->FalseLabel(chunk_));
       }
       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
         // true -> true.
         __ cmp(reg, factory()->true_value());
-        __ j(equal, true_label);
+        __ j(equal, instr->TrueLabel(chunk_));
         // false -> false.
         __ cmp(reg, factory()->false_value());
-        __ j(equal, false_label);
+        __ j(equal, instr->FalseLabel(chunk_));
       }
       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
         // 'null' -> false.
         __ cmp(reg, factory()->null_value());
-        __ j(equal, false_label);
+        __ j(equal, instr->FalseLabel(chunk_));
       }
 
       if (expected.Contains(ToBooleanStub::SMI)) {
         // Smis: 0 -> false, all other -> true.
         __ test(reg, Operand(reg));
-        __ j(equal, false_label);
-        __ JumpIfSmi(reg, true_label);
+        __ j(equal, instr->FalseLabel(chunk_));
+        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
       } else if (expected.NeedsMap()) {
         // If we need a map later and have a Smi -> deopt.
         __ test(reg, Immediate(kSmiTagMask));
@@ -2182,14 +2167,14 @@
           // Undetectable -> false.
           __ test_b(FieldOperand(map, Map::kBitFieldOffset),
                     1 << Map::kIsUndetectable);
-          __ j(not_zero, false_label);
+          __ j(not_zero, instr->FalseLabel(chunk_));
         }
       }
 
       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
         // spec object -> true.
         __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
-        __ j(above_equal, true_label);
+        __ j(above_equal, instr->TrueLabel(chunk_));
       }
 
       if (expected.Contains(ToBooleanStub::STRING)) {
@@ -2198,15 +2183,15 @@
         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
         __ j(above_equal, &not_string, Label::kNear);
         __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
-        __ j(not_zero, true_label);
-        __ jmp(false_label);
+        __ j(not_zero, instr->TrueLabel(chunk_));
+        __ jmp(instr->FalseLabel(chunk_));
         __ bind(&not_string);
       }
 
       if (expected.Contains(ToBooleanStub::SYMBOL)) {
         // Symbol value -> true.
         __ CmpInstanceType(map, SYMBOL_TYPE);
-        __ j(equal, true_label);
+        __ j(equal, instr->TrueLabel(chunk_));
       }
 
       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
@@ -2224,13 +2209,16 @@
           __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
           __ FCmp();
         }
-        __ j(zero, false_label);
-        __ jmp(true_label);
+        __ j(zero, instr->FalseLabel(chunk_));
+        __ jmp(instr->TrueLabel(chunk_));
         __ bind(&not_heap_number);
       }
 
-      // We've seen something for the first time -> deopt.
-      DeoptimizeIf(no_condition, instr->environment());
+      if (!expected.IsGeneric()) {
+        // We've seen something for the first time -> deopt.
+        // This can only happen if we are not generic already.
+        DeoptimizeIf(no_condition, instr->environment());
+      }
     }
   }
 }
@@ -2238,7 +2226,7 @@
 
 void LCodeGen::EmitGoto(int block) {
   if (!IsNextEmittedBlock(block)) {
-    __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
+    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
   }
 }
 
@@ -2279,17 +2267,14 @@
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
   Condition cc = TokenToCondition(instr->op(), instr->is_double());
 
   if (left->IsConstantOperand() && right->IsConstantOperand()) {
     // We can statically evaluate the comparison.
     double left_val = ToDouble(LConstantOperand::cast(left));
     double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block =
-      EvalComparison(instr->op(), left_val, right_val) ? true_block
-                                                       : false_block;
+    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
     EmitGoto(next_block);
   } else {
     if (instr->is_double()) {
@@ -2297,7 +2282,7 @@
       // Don't base result on EFLAGS when a NaN is involved. Instead
       // jump to the false block.
       __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
-      __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+      __ j(parity_even, instr->FalseLabel(chunk_));
     } else {
       if (right->IsConstantOperand()) {
         int32_t const_value = ToInteger32(LConstantOperand::cast(right));
@@ -2319,15 +2304,13 @@
         __ cmp(ToRegister(left), ToOperand(right));
       }
     }
-    EmitBranch(true_block, false_block, cc);
+    EmitBranch(instr, cc);
   }
 }
 
 
 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   Register left = ToRegister(instr->left());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
 
   if (instr->right()->IsConstantOperand()) {
     Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
@@ -2336,17 +2319,15 @@
     Operand right = ToOperand(instr->right());
     __ cmp(left, right);
   }
-  EmitBranch(true_block, false_block, equal);
+  EmitBranch(instr, equal);
 }
 
 
 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
   Register left = ToRegister(instr->left());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
   __ cmp(left, instr->hydrogen()->right());
-  EmitBranch(true_block, false_block, equal);
+  EmitBranch(instr, equal);
 }
 
 
@@ -2377,21 +2358,20 @@
   Register reg = ToRegister(instr->value());
   Register temp = ToRegister(instr->temp());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* true_label = chunk_->GetAssemblyLabel(true_block);
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+  Condition true_cond = EmitIsObject(
+      reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
 
-  Condition true_cond = EmitIsObject(reg, temp, false_label, true_label);
-
-  EmitBranch(true_block, false_block, true_cond);
+  EmitBranch(instr, true_cond);
 }
 
 
 Condition LCodeGen::EmitIsString(Register input,
                                  Register temp1,
-                                 Label* is_not_string) {
-  __ JumpIfSmi(input, is_not_string);
+                                 Label* is_not_string,
+                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
+  if (check_needed == INLINE_SMI_CHECK) {
+    __ JumpIfSmi(input, is_not_string);
+  }
 
   Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
 
@@ -2403,24 +2383,22 @@
   Register reg = ToRegister(instr->value());
   Register temp = ToRegister(instr->temp());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+  SmiCheck check_needed =
+      instr->hydrogen()->value()->IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
 
-  Condition true_cond = EmitIsString(reg, temp, false_label);
+  Condition true_cond = EmitIsString(
+      reg, temp, instr->FalseLabel(chunk_), check_needed);
 
-  EmitBranch(true_block, false_block, true_cond);
+  EmitBranch(instr, true_cond);
 }
 
 
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   Operand input = ToOperand(instr->value());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
   __ test(input, Immediate(kSmiTagMask));
-  EmitBranch(true_block, false_block, zero);
+  EmitBranch(instr, zero);
 }
 
 
@@ -2428,15 +2406,14 @@
   Register input = ToRegister(instr->value());
   Register temp = ToRegister(instr->temp());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  STATIC_ASSERT(kSmiTag == 0);
-  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
   __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
   __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
             1 << Map::kIsUndetectable);
-  EmitBranch(true_block, false_block, not_zero);
+  EmitBranch(instr, not_zero);
 }
 
 
@@ -2462,8 +2439,6 @@
 
 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   Token::Value op = instr->op();
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -2471,7 +2446,7 @@
   Condition condition = ComputeCompareCondition(op);
   __ test(eax, Operand(eax));
 
-  EmitBranch(true_block, false_block, condition);
+  EmitBranch(instr, condition);
 }
 
 
@@ -2499,15 +2474,12 @@
   Register input = ToRegister(instr->value());
   Register temp = ToRegister(instr->temp());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  __ JumpIfSmi(input, false_label);
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
 
   __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
-  EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
+  EmitBranch(instr, BranchCondition(instr->hydrogen()));
 }
 
 
@@ -2526,12 +2498,9 @@
     LHasCachedArrayIndexAndBranch* instr) {
   Register input = ToRegister(instr->value());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
   __ test(FieldOperand(input, String::kHashFieldOffset),
           Immediate(String::kContainsCachedArrayIndexMask));
-  EmitBranch(true_block, false_block, equal);
+  EmitBranch(instr, equal);
 }
 
 
@@ -2607,25 +2576,17 @@
 
   Handle<String> class_name = instr->hydrogen()->class_name();
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+      class_name, input, temp, temp2);
 
-  Label* true_label = chunk_->GetAssemblyLabel(true_block);
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
-  EmitBranch(true_block, false_block, equal);
+  EmitBranch(instr, equal);
 }
 
 
 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   Register reg = ToRegister(instr->value());
-  int true_block = instr->true_block_id();
-  int false_block = instr->false_block_id();
-
   __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
-  EmitBranch(true_block, false_block, equal);
+  EmitBranch(instr, equal);
 }
 
 
@@ -2952,9 +2913,9 @@
 
   __ mov(target, value);
   if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
     SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+        instr->hydrogen()->value()->IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     Register temp = ToRegister(instr->temp());
     int offset = Context::SlotOffset(instr->slot_index());
     __ RecordWriteContextSlot(context,
@@ -4197,12 +4158,9 @@
   ASSERT(ToRegister(instr->constructor()).is(edi));
   ASSERT(ToRegister(instr->result()).is(eax));
 
-  if (FLAG_optimize_constructed_arrays) {
-    // No cell in ebx for construct type feedback in optimized code
-    Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
-                                   isolate());
-    __ mov(ebx, Immediate(undefined_value));
-  }
+  // No cell in ebx for construct type feedback in optimized code
+  Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+  __ mov(ebx, Immediate(undefined_value));
   CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
   __ Set(eax, Immediate(instr->arity()));
   CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
@@ -4213,16 +4171,18 @@
   ASSERT(ToRegister(instr->context()).is(esi));
   ASSERT(ToRegister(instr->constructor()).is(edi));
   ASSERT(ToRegister(instr->result()).is(eax));
-  ASSERT(FLAG_optimize_constructed_arrays);
 
   __ Set(eax, Immediate(instr->arity()));
   __ mov(ebx, instr->hydrogen()->property_cell());
   ElementsKind kind = instr->hydrogen()->elements_kind();
-  bool disable_allocation_sites =
-      (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
+  AllocationSiteOverrideMode override_mode =
+      (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE)
+          ? DISABLE_ALLOCATION_SITES
+          : DONT_OVERRIDE;
+  ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
 
   if (instr->arity() == 0) {
-    ArrayNoArgumentConstructorStub stub(kind, disable_allocation_sites);
+    ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   } else if (instr->arity() == 1) {
     Label done;
@@ -4235,18 +4195,18 @@
       __ j(zero, &packed_case);
 
       ElementsKind holey_kind = GetHoleyElementsKind(kind);
-      ArraySingleArgumentConstructorStub stub(holey_kind,
-                                              disable_allocation_sites);
+      ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+                                              override_mode);
       CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
       __ jmp(&done);
       __ bind(&packed_case);
     }
 
-    ArraySingleArgumentConstructorStub stub(kind, disable_allocation_sites);
+    ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
     __ bind(&done);
   } else {
-    ArrayNArgumentsConstructorStub stub(kind, disable_allocation_sites);
+    ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   }
 }
@@ -4327,9 +4287,9 @@
   }
 
   // Do the store.
-  HType type = instr->hydrogen()->value()->type();
   SmiCheck check_needed =
-      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+      instr->hydrogen()->value()->IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
 
   Register write_register = object;
   if (!access.IsInobject()) {
@@ -4566,9 +4526,9 @@
     ASSERT(instr->value()->IsRegister());
     Register value = ToRegister(instr->value());
     ASSERT(!instr->key()->IsConstantOperand());
-    HType type = instr->hydrogen()->value()->type();
     SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+        instr->hydrogen()->value()->IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
     __ lea(key, operand);
     __ RecordWrite(elements,
@@ -4911,7 +4871,8 @@
   } else {
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope feature_scope(masm(), SSE2);
-      __ LoadUint32(xmm0, reg, xmm1);
+      __ LoadUint32(xmm0, reg,
+                    ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
     } else {
       // There's no fild variant for unsigned values, so zero-extend to a 64-bit
       // int manually.
@@ -5709,9 +5670,11 @@
 
 
 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
-  LOperand* input = instr->value();
-  __ test(ToOperand(input), Immediate(kSmiTagMask));
-  DeoptimizeIf(zero, instr->environment());
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    LOperand* input = instr->value();
+    __ test(ToOperand(input), Immediate(kSmiTagMask));
+    DeoptimizeIf(zero, instr->environment());
+  }
 }
 
 
@@ -5991,6 +5954,95 @@
 }
 
 
+void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
+  class DeferredAllocateObject: public LDeferredCode {
+   public:
+    DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LAllocateObject* instr_;
+  };
+
+  DeferredAllocateObject* deferred =
+      new(zone()) DeferredAllocateObject(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->temp());
+  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+  Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+  int instance_size = initial_map->instance_size();
+  ASSERT(initial_map->pre_allocated_property_fields() +
+         initial_map->unused_property_fields() -
+         initial_map->inobject_properties() == 0);
+
+  __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(),
+              TAG_OBJECT);
+
+  __ bind(deferred->exit());
+  if (FLAG_debug_code) {
+    Label is_in_new_space;
+    __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
+    __ Abort("Allocated object is not in new-space");
+    __ bind(&is_in_new_space);
+  }
+
+  // Load the initial map.
+  Register map = scratch;
+  __ LoadHeapObject(scratch, constructor);
+  __ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
+
+  if (FLAG_debug_code) {
+    __ AssertNotSmi(map);
+    __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
+            instance_size >> kPointerSizeLog2);
+    __ Assert(equal, "Unexpected instance size");
+    __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
+            initial_map->pre_allocated_property_fields());
+    __ Assert(equal, "Unexpected pre-allocated property fields count");
+    __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
+            initial_map->unused_property_fields());
+    __ Assert(equal, "Unexpected unused property fields count");
+    __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
+            initial_map->inobject_properties());
+    __ Assert(equal, "Unexpected in-object property fields count");
+  }
+
+  // Initialize map and fields of the newly allocated object.
+  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+  __ mov(FieldOperand(result, JSObject::kMapOffset), map);
+  __ mov(scratch, factory()->empty_fixed_array());
+  __ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
+  __ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+  if (initial_map->inobject_properties() != 0) {
+    __ mov(scratch, factory()->undefined_value());
+    for (int i = 0; i < initial_map->inobject_properties(); i++) {
+      int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+      __ mov(FieldOperand(result, property_offset), scratch);
+    }
+  }
+}
+
+
+void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+  Register result = ToRegister(instr->result());
+  Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+  int instance_size = initial_map->instance_size();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Set(result, Immediate(0));
+
+  PushSafepointRegistersScope scope(this);
+  __ push(Immediate(Smi::FromInt(instance_size)));
+  CallRuntimeFromDeferred(
+      Runtime::kAllocateInNewSpace, 1, instr, instr->context());
+  __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
 void LCodeGen::DoAllocate(LAllocate* instr) {
   class DeferredAllocate: public LDeferredCode {
    public:
@@ -6154,15 +6206,12 @@
 
 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   Register input = ToRegister(instr->value());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* true_label = chunk_->GetAssemblyLabel(true_block);
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
   Condition final_branch_condition =
-      EmitTypeofIs(true_label, false_label, input, instr->type_literal());
+      EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+          input, instr->type_literal());
   if (final_branch_condition != no_condition) {
-    EmitBranch(true_block, false_block, final_branch_condition);
+    EmitBranch(instr, final_branch_condition);
   }
 }
 
@@ -6243,11 +6292,9 @@
 
 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
   Register temp = ToRegister(instr->temp());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
   EmitIsConstructCall(temp);
-  EmitBranch(true_block, false_block, equal);
+  EmitBranch(instr, equal);
 }
 
 
@@ -6393,15 +6440,15 @@
   // properly registered for deoptimization and records the assembler's PC
   // offset.
   LEnvironment* environment = instr->environment();
-  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
-                                   instr->SpilledDoubleRegisterArray());
 
   // If the environment were already registered, we would have no way of
   // backpatching it with the spill slot operands.
   ASSERT(!environment->HasBeenRegistered());
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-  ASSERT(osr_pc_offset_ == -1);
-  osr_pc_offset_ = masm()->pc_offset();
+
+  // Normally we record the first unknown OSR value as the entrypoint to the OSR
+  // code, but if there were none, record the entrypoint here.
+  if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
 }
 
 
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 21f6076..d05da8a 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -83,7 +83,6 @@
   Heap* heap() const { return isolate()->heap(); }
   Zone* zone() const { return zone_; }
 
-  // TODO(svenpanne) Use this consistently.
   int LookupDestination(int block_id) const {
     return chunk()->LookupDestination(block_id);
   }
@@ -157,6 +156,7 @@
   void DoDeferredRandom(LRandom* instr);
   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocateObject(LAllocateObject* instr);
   void DoDeferredAllocate(LAllocate* instr);
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
@@ -317,7 +317,8 @@
 
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
-  void EmitBranch(int left_block, int right_block, Condition cc);
+  template<class InstrType>
+  void EmitBranch(InstrType instr, Condition cc);
   void EmitNumberUntagD(
       Register input,
       Register temp,
@@ -356,7 +357,8 @@
   // true and false label should be made, to optimize fallthrough.
   Condition EmitIsString(Register input,
                          Register temp1,
-                         Label* is_not_string);
+                         Label* is_not_string,
+                         SmiCheck check_needed);
 
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 885b2e3..04b8934 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -43,31 +43,6 @@
 LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
 #undef DEFINE_COMPILE
 
-LOsrEntry::LOsrEntry() {
-  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
-    register_spills_[i] = NULL;
-  }
-  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
-    double_register_spills_[i] = NULL;
-  }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
-                                    LOperand* spill_operand) {
-  ASSERT(spill_operand->IsStackSlot());
-  ASSERT(register_spills_[allocation_index] == NULL);
-  register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
-                                          LOperand* spill_operand) {
-  ASSERT(spill_operand->IsDoubleStackSlot());
-  ASSERT(double_register_spills_[allocation_index] == NULL);
-  double_register_spills_[allocation_index] = spill_operand;
-}
-
 
 #ifdef DEBUG
 void LInstruction::VerifyCall() {
@@ -376,8 +351,7 @@
   constructor()->PrintTo(stream);
   stream->Add(" #%d / ", arity());
   ASSERT(hydrogen()->property_cell()->value()->IsSmi());
-  ElementsKind kind = static_cast<ElementsKind>(
-      Smi::cast(hydrogen()->property_cell()->value())->value());
+  ElementsKind kind = hydrogen()->elements_kind();
   stream->Add(" (%s) ", ElementsKindToString(kind));
 }
 
@@ -481,7 +455,7 @@
 LPlatformChunk* LChunkBuilder::Build() {
   ASSERT(is_unused());
   chunk_ = new(zone()) LPlatformChunk(info(), graph());
-  HPhase phase("L_Building chunk", chunk_);
+  LPhase phase("L_Building chunk", chunk_);
   status_ = BUILDING;
 
   // Reserve the first spill slot for the state of dynamic alignment.
@@ -1060,20 +1034,28 @@
     return new(zone()) LGoto(successor->block_id());
   }
 
-  // Untagged integers or doubles, smis and booleans don't require a
-  // deoptimization environment nor a temp register.
+  ToBooleanStub::Types expected = instr->expected_input_types();
+
+  // Tagged values that are not known smis or booleans require a
+  // deoptimization environment. If the instruction is generic no
+  // environment is needed since all cases are handled.
   Representation rep = value->representation();
   HType type = value->type();
   if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
     return new(zone()) LBranch(UseRegister(value), NULL);
   }
 
-  ToBooleanStub::Types expected = instr->expected_input_types();
+  bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
+  LOperand* temp = needs_temp ? TempRegister() : NULL;
+
+  // The Generic stub does not have a deopt, so we need no environment.
+  if (expected.IsGeneric()) {
+    return new(zone()) LBranch(UseRegister(value), temp);
+  }
+
   // We need a temporary register when we have to access the map *or* we have
   // no type info yet, in which case we handle all cases (including the ones
   // involving maps).
-  bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
-  LOperand* temp = needs_temp ? TempRegister() : NULL;
   return AssignEnvironment(new(zone()) LBranch(UseRegister(value), temp));
 }
 
@@ -1367,7 +1349,6 @@
 
 
 LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
-  ASSERT(FLAG_optimize_constructed_arrays);
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* constructor = UseFixed(instr->constructor(), edi);
   argument_count_ -= instr->argument_count();
@@ -1543,7 +1524,7 @@
               instr->CheckFlag(HValue::kBailoutOnMinusZero))
           ? AssignEnvironment(result)
           : result;
-    } else if (instr->has_fixed_right_arg()) {
+    } else if (instr->fixed_right_arg().has_value) {
       LModI* mod = new(zone()) LModI(UseRegister(left),
                                      UseRegisterAtStart(right),
                                      NULL);
@@ -2024,7 +2005,9 @@
       if (val->HasRange() && val->range()->IsInSmiRange()) {
         return DefineSameAsFirst(new(zone()) LSmiTag(value));
       } else if (val->CheckFlag(HInstruction::kUint32)) {
-        LNumberTagU* result = new(zone()) LNumberTagU(value);
+        LOperand* temp = CpuFeatures::IsSupported(SSE2) ? FixedTemp(xmm1)
+                                                        : NULL;
+        LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
       } else {
         LNumberTagI* result = new(zone()) LNumberTagI(value);
@@ -2056,7 +2039,7 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
   LOperand* value = UseAtStart(instr->value());
   return AssignEnvironment(new(zone()) LCheckNonSmi(value));
 }
@@ -2548,6 +2531,15 @@
 }
 
 
+LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseAny(instr->context());
+  LOperand* temp = TempRegister();
+  LAllocateObject* result = new(zone()) LAllocateObject(context, temp);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
 LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
   info()->MarkAsDeferredCalling();
   LOperand* context = UseAny(instr->context());
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index ed33668..59c6c81 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -44,6 +44,7 @@
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
   V(Allocate)                                   \
+  V(AllocateObject)                             \
   V(ApplyArguments)                             \
   V(ArgumentsElements)                          \
   V(ArgumentsLength)                            \
@@ -482,17 +483,44 @@
 template<int I, int T>
 class LControlInstruction: public LTemplateInstruction<0, I, T> {
  public:
+  LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
   virtual bool IsControl() const { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-  int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
-  int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+  int TrueDestination(LChunk* chunk) {
+    return chunk->LookupDestination(true_block_id());
+  }
+  int FalseDestination(LChunk* chunk) {
+    return chunk->LookupDestination(false_block_id());
+  }
+
+  Label* TrueLabel(LChunk* chunk) {
+    if (true_label_ == NULL) {
+      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+    }
+    return true_label_;
+  }
+  Label* FalseLabel(LChunk* chunk) {
+    if (false_label_ == NULL) {
+      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+    }
+    return false_label_;
+  }
+
+ protected:
+  int true_block_id() { return SuccessorAt(0)->block_id(); }
+  int false_block_id() { return SuccessorAt(1)->block_id(); }
 
  private:
   HControlInstruction* hydrogen() {
     return HControlInstruction::cast(this->hydrogen_value());
   }
+
+  Label* false_label_;
+  Label* true_label_;
 };
 
 
@@ -867,6 +895,7 @@
   LOperand* temp() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -899,6 +928,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
                                "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1206,7 +1236,7 @@
 };
 
 
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
+class LCmpMapAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LCmpMapAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1217,15 +1247,7 @@
   DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(CompareMap)
 
-  virtual bool IsControl() const { return true; }
-
   Handle<Map> map() const { return hydrogen()->map(); }
-  int true_block_id() const {
-    return hydrogen()->FirstSuccessor()->block_id();
-  }
-  int false_block_id() const {
-    return hydrogen()->SecondSuccessor()->block_id();
-  }
 };
 
 
@@ -2048,13 +2070,15 @@
 };
 
 
-class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
  public:
-  explicit LNumberTagU(LOperand* value) {
+  LNumberTagU(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
+    temps_[0] = temp;
   }
 
   LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
 };
@@ -2548,6 +2572,22 @@
   LOperand* value() { return inputs_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LAllocateObject: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LAllocateObject(LOperand* context, LOperand* temp) {
+    inputs_[0] = context;
+    temps_[0] = temp;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
 };
 
 
@@ -2656,26 +2696,10 @@
 
 class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
  public:
-  LOsrEntry();
+  LOsrEntry() {}
 
   virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
-  LOperand** SpilledRegisterArray() { return register_spills_; }
-  LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
-  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
-  void MarkSpilledDoubleRegister(int allocation_index,
-                                 LOperand* spill_operand);
-
- private:
-  // Arrays of spill slot operands for registers with an assigned spill
-  // slot, i.e., that must also be restored to the spill slot on OSR entry.
-  // NULL if the register has no assigned spill slot.  Indexed by allocation
-  // index.
-  LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
-  LOperand* double_register_spills_[
-      DoubleRegister::kMaxNumAllocatableRegisters];
 };
 
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 17d4aac..31e9e53 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -2807,6 +2807,17 @@
 }
 
 
+void MacroAssembler::JumpIfNotUniqueName(Operand operand,
+                                         Label* not_unique_name,
+                                         Label::Distance distance) {
+  STATIC_ASSERT(((SYMBOL_TYPE - 1) & kIsInternalizedMask) == kInternalizedTag);
+  cmp(operand, Immediate(kInternalizedTag));
+  j(less, not_unique_name, distance);
+  cmp(operand, Immediate(SYMBOL_TYPE));
+  j(greater, not_unique_name, distance);
+}
+
+
 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
   int frame_alignment = OS::ActivationFrameAlignment();
   if (frame_alignment != 0) {
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 7d780f0..5cb8286 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -884,6 +884,15 @@
                                            Register scratch2,
                                            Label* on_not_flat_ascii_strings);
 
+  // Checks if the given register or operand is a unique name
+  void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
+                           Label::Distance distance = Label::kFar) {
+    JumpIfNotUniqueName(Operand(reg), not_unique_name, distance);
+  }
+
+  void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
+                           Label::Distance distance = Label::kFar);
+
   static int SafepointRegisterStackIndex(Register reg) {
     return SafepointRegisterStackIndex(reg.code());
   }
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index c0ee28a..1a51016 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1696,12 +1696,59 @@
 }
 
 
+Handle<Code> CallStubCompiler::CompileArrayCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<Cell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name,
+    Code::StubType type) {
+  Label miss;
+
+  // Check that function is still array
+  const int argc = arguments().immediate();
+  GenerateNameCheck(name, &miss);
+
+  if (cell.is_null()) {
+    // Get the receiver from the stack.
+    __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+    // Check that the receiver isn't a smi.
+    __ JumpIfSmi(edx, &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                    name, &miss);
+  } else {
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
+    GenerateLoadFunctionFromCell(cell, function, &miss);
+  }
+
+  Handle<Smi> kind(Smi::FromInt(GetInitialFastElementsKind()), isolate());
+  Handle<Cell> kind_feedback_cell =
+      isolate()->factory()->NewCell(kind);
+  __ mov(eax, Immediate(argc));
+  __ mov(ebx, kind_feedback_cell);
+  __ mov(edi, function);
+
+  ArrayConstructorStub stub(isolate());
+  __ TailCallStub(&stub);
+
+  __ bind(&miss);
+  GenerateMissBranch();
+
+  // Return the generated code.
+  return GetCode(type, name);
+}
+
+
 Handle<Code> CallStubCompiler::CompileArrayPushCall(
     Handle<Object> object,
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1950,7 +1997,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -1959,7 +2006,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2032,7 +2080,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -2041,7 +2089,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- ecx                 : function name
   //  -- esp[0]              : return address
@@ -2116,7 +2165,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -2125,7 +2174,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- ecx                 : function name
   //  -- esp[0]              : return address
@@ -2202,7 +2252,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -2211,7 +2261,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- ecx                 : function name
   //  -- esp[0]              : return address
@@ -2278,7 +2329,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+  return GetCode(type, name);
 }
 
 
@@ -2287,7 +2338,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2409,7 +2461,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+  return GetCode(type, name);
 }
 
 
@@ -2418,7 +2470,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2515,7 +2568,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+  return GetCode(type, name);
 }
 
 
@@ -2706,7 +2759,8 @@
   if (HasCustomCallGenerator(function)) {
     Handle<Code> code = CompileCustomCall(object, holder,
                                           Handle<Cell>::null(),
-                                          function, Handle<String>::cast(name));
+                                          function, Handle<String>::cast(name),
+                                          Code::CONSTANT_FUNCTION);
     // A null handle means bail out to the regular compiler code below.
     if (!code.is_null()) return code;
   }
@@ -2797,7 +2851,8 @@
 
   if (HasCustomCallGenerator(function)) {
     Handle<Code> code = CompileCustomCall(
-        object, holder, cell, function, Handle<String>::cast(name));
+        object, holder, cell, function, Handle<String>::cast(name),
+        Code::NORMAL);
     // A null handle means bail out to the regular compiler code below.
     if (!code.is_null()) return code;
   }
diff --git a/src/ic.cc b/src/ic.cc
index d6e35a7..ff3a94d 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -144,7 +144,7 @@
   ASSERT(fp == frame->fp() && pc_address == frame->pc_address());
 #endif
   fp_ = fp;
-  pc_address_ = pc_address;
+  pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
 }
 
 
@@ -2437,7 +2437,7 @@
     case UNINITIALIZED:
       return handle(Type::None(), isolate);
     case SMI:
-      return handle(Type::Integer31(), isolate);
+      return handle(Type::Smi(), isolate);
     case NUMBER:
       return handle(Type::Number(), isolate);
     case GENERIC:
@@ -2524,9 +2524,9 @@
     case UNINITIALIZED:
       return handle(Type::None(), isolate);
     case SMI:
-      return handle(Type::Integer31(), isolate);
+      return handle(Type::Smi(), isolate);
     case INT32:
-      return handle(Type::Integer32(), isolate);
+      return handle(Type::Signed32(), isolate);
     case NUMBER:
       return handle(Type::Number(), isolate);
     case ODDBALL:
@@ -2653,11 +2653,10 @@
 #ifdef DEBUG
 static void TraceBinaryOp(BinaryOpIC::TypeInfo left,
                           BinaryOpIC::TypeInfo right,
-                          bool has_fixed_right_arg,
-                          int32_t fixed_right_arg_value,
+                          Maybe<int32_t> fixed_right_arg,
                           BinaryOpIC::TypeInfo result) {
   PrintF("%s*%s", BinaryOpIC::GetName(left), BinaryOpIC::GetName(right));
-  if (has_fixed_right_arg) PrintF("{%d}", fixed_right_arg_value);
+  if (fixed_right_arg.has_value) PrintF("{%d}", fixed_right_arg.value);
   PrintF("->%s", BinaryOpIC::GetName(result));
 }
 #endif
@@ -2689,10 +2688,8 @@
   BinaryOpIC::TypeInfo new_overall = Max(new_left, new_right);
   BinaryOpIC::TypeInfo previous_overall = Max(previous_left, previous_right);
 
-  bool previous_has_fixed_right_arg =
-      BinaryOpStub::decode_has_fixed_right_arg_from_minor_key(key);
-  int previous_fixed_right_arg_value =
-      BinaryOpStub::decode_fixed_right_arg_value_from_minor_key(key);
+  Maybe<int> previous_fixed_right_arg =
+      BinaryOpStub::decode_fixed_right_arg_from_minor_key(key);
 
   int32_t value;
   bool new_has_fixed_right_arg =
@@ -2700,11 +2697,12 @@
       right->ToInt32(&value) &&
       BinaryOpStub::can_encode_arg_value(value) &&
       (previous_overall == BinaryOpIC::UNINITIALIZED ||
-       (previous_has_fixed_right_arg &&
-        previous_fixed_right_arg_value == value));
-  int32_t new_fixed_right_arg_value = new_has_fixed_right_arg ? value : 1;
+       (previous_fixed_right_arg.has_value &&
+        previous_fixed_right_arg.value == value));
+  Maybe<int32_t> new_fixed_right_arg(
+      new_has_fixed_right_arg, new_has_fixed_right_arg ? value : 1);
 
-  if (previous_has_fixed_right_arg == new_has_fixed_right_arg) {
+  if (previous_fixed_right_arg.has_value == new_fixed_right_arg.has_value) {
     if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) {
       if (op == Token::DIV ||
           op == Token::MUL ||
@@ -2728,8 +2726,7 @@
     }
   }
 
-  BinaryOpStub stub(key, new_left, new_right, result_type,
-                    new_has_fixed_right_arg, new_fixed_right_arg_value);
+  BinaryOpStub stub(key, new_left, new_right, result_type, new_fixed_right_arg);
   Handle<Code> code = stub.GetCode(isolate);
   if (!code.is_null()) {
 #ifdef DEBUG
@@ -2737,11 +2734,10 @@
       PrintF("[BinaryOpIC in ");
       JavaScriptFrame::PrintTop(isolate, stdout, false, true);
       PrintF(" ");
-      TraceBinaryOp(previous_left, previous_right, previous_has_fixed_right_arg,
-                    previous_fixed_right_arg_value, previous_result);
+      TraceBinaryOp(previous_left, previous_right, previous_fixed_right_arg,
+                    previous_result);
       PrintF(" => ");
-      TraceBinaryOp(new_left, new_right, new_has_fixed_right_arg,
-                    new_fixed_right_arg_value, result_type);
+      TraceBinaryOp(new_left, new_right, new_fixed_right_arg, result_type);
       PrintF(" #%s @ %p]\n", Token::Name(op), static_cast<void*>(*code));
     }
 #endif
@@ -2849,7 +2845,7 @@
     case CompareIC::UNINITIALIZED:
       return handle(Type::None(), isolate);
     case CompareIC::SMI:
-      return handle(Type::Integer31(), isolate);
+      return handle(Type::Smi(), isolate);
     case CompareIC::NUMBER:
       return handle(Type::Number(), isolate);
     case CompareIC::STRING:
diff --git a/src/isolate.cc b/src/isolate.cc
index 6c797b2..2383399 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1738,7 +1738,6 @@
       descriptor_lookup_cache_(NULL),
       handle_scope_implementer_(NULL),
       unicode_cache_(NULL),
-      runtime_zone_(this),
       in_use_list_(0),
       free_list_(0),
       preallocated_storage_preallocated_(false),
@@ -1754,8 +1753,10 @@
       date_cache_(NULL),
       code_stub_interface_descriptors_(NULL),
       context_exit_happened_(false),
+      initialized_from_snapshot_(false),
       cpu_profiler_(NULL),
       heap_profiler_(NULL),
+      function_entry_hook_(NULL),
       deferred_handles_head_(NULL),
       optimizing_compiler_thread_(this),
       marking_thread_(NULL),
@@ -1935,9 +1936,6 @@
 Isolate::~Isolate() {
   TRACE_ISOLATE(destructor);
 
-  // Has to be called while counters_ are still alive.
-  runtime_zone_.DeleteKeptSegment();
-
   delete[] assembler_spare_buffer_;
   assembler_spare_buffer_ = NULL;
 
@@ -2081,6 +2079,14 @@
   ASSERT(Isolate::Current() == this);
   TRACE_ISOLATE(init);
 
+  if (function_entry_hook() != NULL) {
+    // When function entry hooking is in effect, we have to create the code
+    // stubs from scratch to get entry hooks, rather than loading the previously
+    // generated stubs from disk.
+    // If this assert fires, the initialization path has regressed.
+    ASSERT(des == NULL);
+  }
+
   // The initialization process does not handle memory exhaustion.
   DisallowAllocationFailure disallow_allocation_failure;
 
@@ -2099,7 +2105,7 @@
   isolate_addresses_[Isolate::k##CamelName##Address] =          \
       reinterpret_cast<Address>(hacker_name##_address());
   FOR_EACH_ISOLATE_ADDRESS_NAME(ASSIGN_ELEMENT)
-#undef C
+#undef ASSIGN_ELEMENT
 
   string_tracker_ = new StringTracker();
   string_tracker_->isolate_ = this;
@@ -2114,7 +2120,7 @@
   global_handles_ = new GlobalHandles(this);
   bootstrapper_ = new Bootstrapper(this);
   handle_scope_implementer_ = new HandleScopeImplementer(this);
-  stub_cache_ = new StubCache(this, runtime_zone());
+  stub_cache_ = new StubCache(this);
   regexp_stack_ = new RegExpStack();
   regexp_stack_->isolate_ = this;
   date_cache_ = new DateCache();
@@ -2216,8 +2222,6 @@
     LOG(this, LogCompiledFunctions());
   }
 
-  CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, state_)),
-           Internals::kIsolateStateOffset);
   CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)),
            Internals::kIsolateEmbedderDataOffset);
   CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
@@ -2259,47 +2263,24 @@
 
   if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
 
-  if (FLAG_parallel_marking && FLAG_marking_threads == 0) {
-    FLAG_marking_threads = SystemThreadManager::
-        NumberOfParallelSystemThreads(
-            SystemThreadManager::PARALLEL_MARKING);
-  }
   if (FLAG_marking_threads > 0) {
     marking_thread_ = new MarkingThread*[FLAG_marking_threads];
     for (int i = 0; i < FLAG_marking_threads; i++) {
       marking_thread_[i] = new MarkingThread(this);
       marking_thread_[i]->Start();
     }
-  } else {
-    FLAG_parallel_marking = false;
   }
 
-  if (FLAG_sweeper_threads == 0) {
-    if (FLAG_concurrent_sweeping) {
-      FLAG_sweeper_threads = SystemThreadManager::
-          NumberOfParallelSystemThreads(
-              SystemThreadManager::CONCURRENT_SWEEPING);
-    } else if (FLAG_parallel_sweeping) {
-      FLAG_sweeper_threads = SystemThreadManager::
-          NumberOfParallelSystemThreads(
-              SystemThreadManager::PARALLEL_SWEEPING);
-    }
-  }
   if (FLAG_sweeper_threads > 0) {
     sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads];
     for (int i = 0; i < FLAG_sweeper_threads; i++) {
       sweeper_thread_[i] = new SweeperThread(this);
       sweeper_thread_[i]->Start();
     }
-  } else {
-    FLAG_concurrent_sweeping = false;
-    FLAG_parallel_sweeping = false;
   }
-  if (FLAG_parallel_recompilation &&
-      SystemThreadManager::NumberOfParallelSystemThreads(
-          SystemThreadManager::PARALLEL_RECOMPILATION) == 0) {
-    FLAG_parallel_recompilation = false;
-  }
+
+  initialized_from_snapshot_ = (des != NULL);
+
   return true;
 }
 
diff --git a/src/isolate.h b/src/isolate.h
index 76a5a41..981f81c 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -368,8 +368,6 @@
   /* AstNode state. */                                                         \
   V(int, ast_node_id, 0)                                                       \
   V(unsigned, ast_node_count, 0)                                               \
-  /* SafeStackFrameIterator activations count. */                              \
-  V(int, safe_stack_iterator_counter, 0)                                       \
   V(bool, observer_delivery_pending, false)                                    \
   V(HStatistics*, hstatistics, NULL)                                           \
   V(HTracer*, htracer, NULL)                                                   \
@@ -549,7 +547,7 @@
   }
   Context** context_address() { return &thread_local_top_.context_; }
 
-  SaveContext* save_context() {return thread_local_top_.save_context_; }
+  SaveContext* save_context() { return thread_local_top_.save_context_; }
   void set_save_context(SaveContext* save) {
     thread_local_top_.save_context_ = save;
   }
@@ -894,7 +892,6 @@
     ASSERT(handle_scope_implementer_);
     return handle_scope_implementer_;
   }
-  Zone* runtime_zone() { return &runtime_zone_; }
 
   UnicodeCache* unicode_cache() {
     return unicode_cache_;
@@ -1053,6 +1050,8 @@
     context_exit_happened_ = context_exit_happened;
   }
 
+  bool initialized_from_snapshot() { return initialized_from_snapshot_; }
+
   double time_millis_since_init() {
     return OS::TimeCurrentMillis() - time_millis_at_init_;
   }
@@ -1107,14 +1106,19 @@
     callback_table_ = callback_table;
   }
 
+  int id() const { return static_cast<int>(id_); }
+
   HStatistics* GetHStatistics();
   HTracer* GetHTracer();
 
+  FunctionEntryHook function_entry_hook() { return function_entry_hook_; }
+  void set_function_entry_hook(FunctionEntryHook function_entry_hook) {
+    function_entry_hook_ = function_entry_hook;
+  }
+
  private:
   Isolate();
 
-  int id() const { return static_cast<int>(id_); }
-
   friend struct GlobalState;
   friend struct InitializeGlobalState;
 
@@ -1262,7 +1266,6 @@
   v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
   HandleScopeImplementer* handle_scope_implementer_;
   UnicodeCache* unicode_cache_;
-  Zone runtime_zone_;
   PreallocatedStorage in_use_list_;
   PreallocatedStorage free_list_;
   bool preallocated_storage_preallocated_;
@@ -1292,6 +1295,9 @@
   // that a context was recently exited.
   bool context_exit_happened_;
 
+  // True if this isolate was initialized from a snapshot.
+  bool initialized_from_snapshot_;
+
   // Time stamp at initialization.
   double time_millis_at_init_;
 
@@ -1315,6 +1321,7 @@
 #endif
   CpuProfiler* cpu_profiler_;
   HeapProfiler* heap_profiler_;
+  FunctionEntryHook function_entry_hook_;
 
 #define GLOBAL_BACKING_STORE(type, name, initialvalue)                         \
   type name##_;
diff --git a/src/json-parser.h b/src/json-parser.h
index 152bd63..72c6910 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -43,15 +43,33 @@
 template <bool seq_ascii>
 class JsonParser BASE_EMBEDDED {
  public:
-  static Handle<Object> Parse(Handle<String> source, Zone* zone) {
-    return JsonParser().ParseJson(source, zone);
+  static Handle<Object> Parse(Handle<String> source) {
+    return JsonParser(source).ParseJson();
   }
 
   static const int kEndOfString = -1;
 
  private:
+  explicit JsonParser(Handle<String> source)
+      : source_(source),
+        source_length_(source->length()),
+        isolate_(source->map()->GetHeap()->isolate()),
+        factory_(isolate_->factory()),
+        zone_(isolate_),
+        object_constructor_(isolate_->native_context()->object_function(),
+                            isolate_),
+        position_(-1) {
+    FlattenString(source_);
+    pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
+
+    // Optimized fast case where we only have ASCII characters.
+    if (seq_ascii) {
+      seq_source_ = Handle<SeqOneByteString>::cast(source_);
+    }
+  }
+
   // Parse a string containing a single JSON value.
-  Handle<Object> ParseJson(Handle<String> source, Zone* zone);
+  Handle<Object> ParseJson();
 
   inline void Advance() {
     position_++;
@@ -179,13 +197,14 @@
   inline Isolate* isolate() { return isolate_; }
   inline Factory* factory() { return factory_; }
   inline Handle<JSFunction> object_constructor() { return object_constructor_; }
-  inline Zone* zone() const { return zone_; }
 
   static const int kInitialSpecialStringLength = 1024;
   static const int kPretenureTreshold = 100 * 1024;
 
 
  private:
+  Zone* zone() { return &zone_; }
+
   Handle<String> source_;
   int source_length_;
   Handle<SeqOneByteString> seq_source_;
@@ -193,32 +212,14 @@
   PretenureFlag pretenure_;
   Isolate* isolate_;
   Factory* factory_;
+  Zone zone_;
   Handle<JSFunction> object_constructor_;
   uc32 c0_;
   int position_;
-  Zone* zone_;
 };
 
 template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source,
-                                                Zone* zone) {
-  isolate_ = source->map()->GetHeap()->isolate();
-  factory_ = isolate_->factory();
-  object_constructor_ = Handle<JSFunction>(
-      isolate()->native_context()->object_function(), isolate());
-  zone_ = zone;
-  FlattenString(source);
-  source_ = source;
-  source_length_ = source_->length();
-  pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
-
-  // Optimized fast case where we only have ASCII characters.
-  if (seq_ascii) {
-    seq_source_ = Handle<SeqOneByteString>::cast(source_);
-  }
-
-  // Set initial position right before the string.
-  position_ = -1;
+Handle<Object> JsonParser<seq_ascii>::ParseJson() {
   // Advance to the first character (possibly EOS)
   AdvanceSkipWhitespace();
   Handle<Object> result = ParseJsonValue();
@@ -264,7 +265,7 @@
         break;
     }
 
-    MessageLocation location(factory->NewScript(source),
+    MessageLocation location(factory->NewScript(source_),
                              position_,
                              position_ + 1);
     Handle<Object> result = factory->NewSyntaxError(message, array);
@@ -323,7 +324,6 @@
   Handle<JSObject> json_object =
       factory()->NewJSObject(object_constructor(), pretenure_);
   Handle<Map> map(json_object->map());
-  ZoneScope zone_scope(zone(), DELETE_ON_EXIT);
   ZoneList<Handle<Object> > properties(8, zone());
   ASSERT_EQ(c0_, '{');
 
@@ -469,7 +469,6 @@
 template <bool seq_ascii>
 Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
   HandleScope scope(isolate());
-  ZoneScope zone_scope(zone(), DELETE_ON_EXIT);
   ZoneList<Handle<Object> > elements(4, zone());
   ASSERT_EQ(c0_, '[');
 
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 7838c04..5da7398 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -168,10 +168,9 @@
 
 Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
                                    Handle<String> pattern,
-                                   Handle<String> flag_str,
-                                   Zone* zone) {
-  ZoneScope zone_scope(zone, DELETE_ON_EXIT);
+                                   Handle<String> flag_str) {
   Isolate* isolate = re->GetIsolate();
+  Zone zone(isolate);
   JSRegExp::Flags flags = RegExpFlagsFromString(flag_str);
   CompilationCache* compilation_cache = isolate->compilation_cache();
   Handle<FixedArray> cached = compilation_cache->LookupRegExp(pattern, flags);
@@ -188,7 +187,7 @@
   RegExpCompileData parse_result;
   FlatStringReader reader(isolate, pattern);
   if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
-                                 &parse_result, zone)) {
+                                 &parse_result, &zone)) {
     // Throw an exception if we fail to parse the pattern.
     ThrowRegExpException(re,
                          pattern,
@@ -410,7 +409,7 @@
                                  bool is_ascii) {
   // Compile the RegExp.
   Isolate* isolate = re->GetIsolate();
-  ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
+  Zone zone(isolate);
   PostponeInterruptsScope postpone(isolate);
   // If we had a compilation error the last time this is saved at the
   // saved code index.
@@ -441,10 +440,9 @@
   if (!pattern->IsFlat()) FlattenString(pattern);
   RegExpCompileData compile_data;
   FlatStringReader reader(isolate, pattern);
-  Zone* zone = isolate->runtime_zone();
   if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
                                  &compile_data,
-                                 zone)) {
+                                 &zone)) {
     // Throw an exception if we fail to parse the pattern.
     // THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
     ThrowRegExpException(re,
@@ -461,7 +459,7 @@
                             pattern,
                             sample_subject,
                             is_ascii,
-                            zone);
+                            &zone);
   if (result.error_message != NULL) {
     // Unable to compile regexp.
     Handle<String> error_message =
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 181a1b2..528a9a2 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -71,8 +71,7 @@
   // Returns false if compilation fails.
   static Handle<Object> Compile(Handle<JSRegExp> re,
                                 Handle<String> pattern,
-                                Handle<String> flags,
-                                Zone* zone);
+                                Handle<String> flags);
 
   // See ECMA-262 section 15.10.6.2.
   // This function calls the garbage collector if necessary.
diff --git a/src/lithium-allocator-inl.h b/src/lithium-allocator-inl.h
index a6d053a..8cca19b 100644
--- a/src/lithium-allocator-inl.h
+++ b/src/lithium-allocator-inl.h
@@ -148,14 +148,13 @@
 void LAllocator::SetLiveRangeAssignedRegister(
     LiveRange* range,
     int reg,
-    RegisterKind register_kind,
-    Zone* zone) {
+    RegisterKind register_kind) {
   if (register_kind == DOUBLE_REGISTERS) {
     assigned_double_registers_->Add(reg);
   } else {
     assigned_registers_->Add(reg);
   }
-  range->set_assigned_register(reg, register_kind, zone);
+  range->set_assigned_register(reg, register_kind, chunk()->zone());
 }
 
 
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 1fd921f..2e2f802 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -541,16 +541,16 @@
 
 
 LAllocator::LAllocator(int num_values, HGraph* graph)
-    : zone_(graph->zone()),
+    : zone_(graph->isolate()),
       chunk_(NULL),
-      live_in_sets_(graph->blocks()->length(), zone_),
-      live_ranges_(num_values * 2, zone_),
+      live_in_sets_(graph->blocks()->length(), zone()),
+      live_ranges_(num_values * 2, zone()),
       fixed_live_ranges_(NULL),
       fixed_double_live_ranges_(NULL),
-      unhandled_live_ranges_(num_values * 2, zone_),
-      active_live_ranges_(8, zone_),
-      inactive_live_ranges_(8, zone_),
-      reusable_slots_(8, zone_),
+      unhandled_live_ranges_(num_values * 2, zone()),
+      active_live_ranges_(8, zone()),
+      inactive_live_ranges_(8, zone()),
+      reusable_slots_(8, zone()),
       next_virtual_register_(num_values),
       first_artificial_register_(num_values),
       mode_(GENERAL_REGISTERS),
@@ -571,7 +571,7 @@
 BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
   // Compute live out for the given block, except not including backward
   // successor edges.
-  BitVector* live_out = new(zone_) BitVector(next_virtual_register_, zone_);
+  BitVector* live_out = new(zone()) BitVector(next_virtual_register_, zone());
 
   // Process all successor blocks.
   for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
@@ -609,7 +609,7 @@
   while (!iterator.Done()) {
     int operand_index = iterator.Current();
     LiveRange* range = LiveRangeFor(operand_index);
-    range->AddUseInterval(start, end, zone_);
+    range->AddUseInterval(start, end, zone());
     iterator.Advance();
   }
 }
@@ -640,7 +640,7 @@
     TraceAlloc("Fixed reg is tagged at %d\n", pos);
     LInstruction* instr = InstructionAt(pos);
     if (instr->HasPointerMap()) {
-      instr->pointer_map()->RecordPointer(operand, zone());
+      instr->pointer_map()->RecordPointer(operand, chunk()->zone());
     }
   }
   return operand;
@@ -651,9 +651,9 @@
   ASSERT(index < Register::kMaxNumAllocatableRegisters);
   LiveRange* result = fixed_live_ranges_[index];
   if (result == NULL) {
-    result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_);
+    result = new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone());
     ASSERT(result->IsFixed());
-    SetLiveRangeAssignedRegister(result, index, GENERAL_REGISTERS, zone_);
+    SetLiveRangeAssignedRegister(result, index, GENERAL_REGISTERS);
     fixed_live_ranges_[index] = result;
   }
   return result;
@@ -664,9 +664,10 @@
   ASSERT(index < DoubleRegister::NumAllocatableRegisters());
   LiveRange* result = fixed_double_live_ranges_[index];
   if (result == NULL) {
-    result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_);
+    result = new(zone()) LiveRange(FixedDoubleLiveRangeID(index),
+                                   chunk()->zone());
     ASSERT(result->IsFixed());
-    SetLiveRangeAssignedRegister(result, index, DOUBLE_REGISTERS, zone_);
+    SetLiveRangeAssignedRegister(result, index, DOUBLE_REGISTERS);
     fixed_double_live_ranges_[index] = result;
   }
   return result;
@@ -679,7 +680,7 @@
   }
   LiveRange* result = live_ranges_[index];
   if (result == NULL) {
-    result = new(zone_) LiveRange(index, zone_);
+    result = new(zone()) LiveRange(index, chunk()->zone());
     live_ranges_[index] = result;
   }
   return result;
@@ -725,15 +726,15 @@
 
   if (range->IsEmpty() || range->Start().Value() > position.Value()) {
     // Can happen if there is a definition without use.
-    range->AddUseInterval(position, position.NextInstruction(), zone_);
-    range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone_);
+    range->AddUseInterval(position, position.NextInstruction(), zone());
+    range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone());
   } else {
     range->ShortenTo(position);
   }
 
   if (operand->IsUnallocated()) {
     LUnallocated* unalloc_operand = LUnallocated::cast(operand);
-    range->AddUsePosition(position, unalloc_operand, hint, zone_);
+    range->AddUsePosition(position, unalloc_operand, hint, zone());
   }
 }
 
@@ -746,9 +747,9 @@
   if (range == NULL) return;
   if (operand->IsUnallocated()) {
     LUnallocated* unalloc_operand = LUnallocated::cast(operand);
-    range->AddUsePosition(position, unalloc_operand, hint, zone_);
+    range->AddUsePosition(position, unalloc_operand, hint, zone());
   }
-  range->AddUseInterval(block_start, position, zone_);
+  range->AddUseInterval(block_start, position, zone());
 }
 
 
@@ -756,7 +757,8 @@
                                        LOperand* from,
                                        LOperand* to) {
   LGap* gap = GapAt(index);
-  LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
+  LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
+                                                     chunk()->zone());
   if (from->IsUnallocated()) {
     const ZoneList<LMoveOperands>* move_operands = move->move_operands();
     for (int i = 0; i < move_operands->length(); ++i) {
@@ -765,13 +767,13 @@
       if (cur_to->IsUnallocated()) {
         if (LUnallocated::cast(cur_to)->virtual_register() ==
             LUnallocated::cast(from)->virtual_register()) {
-          move->AddMove(cur.source(), to, zone());
+          move->AddMove(cur.source(), to, chunk()->zone());
           return;
         }
       }
     }
   }
-  move->AddMove(from, to, zone());
+  move->AddMove(from, to, chunk()->zone());
 }
 
 
@@ -811,7 +813,8 @@
     LiveRange* range = LiveRangeFor(first_output->virtual_register());
     bool assigned = false;
     if (first_output->HasFixedPolicy()) {
-      LUnallocated* output_copy = first_output->CopyUnconstrained(zone());
+      LUnallocated* output_copy = first_output->CopyUnconstrained(
+          chunk()->zone());
       bool is_tagged = HasTaggedValue(first_output->virtual_register());
       AllocateFixed(first_output, gap_index, is_tagged);
 
@@ -832,8 +835,10 @@
       // Thus it should be inserted to a lifetime position corresponding to
       // the instruction end.
       LGap* gap = GapAt(gap_index);
-      LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE, zone());
-      move->AddMove(first_output, range->GetSpillOperand(), zone());
+      LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE,
+                                                         chunk()->zone());
+      move->AddMove(first_output, range->GetSpillOperand(),
+                    chunk()->zone());
     }
   }
 
@@ -842,7 +847,8 @@
     for (UseIterator it(second); !it.Done(); it.Advance()) {
       LUnallocated* cur_input = LUnallocated::cast(it.Current());
       if (cur_input->HasFixedPolicy()) {
-        LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
+        LUnallocated* input_copy = cur_input->CopyUnconstrained(
+            chunk()->zone());
         bool is_tagged = HasTaggedValue(cur_input->virtual_register());
         AllocateFixed(cur_input, gap_index + 1, is_tagged);
         AddConstraintsGapMove(gap_index, input_copy, cur_input);
@@ -851,7 +857,8 @@
         // of the instruction.
         ASSERT(!cur_input->IsUsedAtStart());
 
-        LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
+        LUnallocated* input_copy = cur_input->CopyUnconstrained(
+            chunk()->zone());
         int vreg = GetVirtualRegister();
         if (!AllocationOk()) return;
         cur_input->set_virtual_register(vreg);
@@ -860,7 +867,7 @@
             DOUBLE_REGISTERS) {
           double_artificial_registers_.Add(
               cur_input->virtual_register() - first_artificial_register_,
-              zone_);
+              zone());
         }
 
         AddConstraintsGapMove(gap_index, input_copy, cur_input);
@@ -876,7 +883,8 @@
       int output_vreg = second_output->virtual_register();
       int input_vreg = cur_input->virtual_register();
 
-      LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
+      LUnallocated* input_copy = cur_input->CopyUnconstrained(
+          chunk()->zone());
       cur_input->set_virtual_register(second_output->virtual_register());
       AddConstraintsGapMove(gap_index, input_copy, cur_input);
 
@@ -884,7 +892,7 @@
         int index = gap_index + 1;
         LInstruction* instr = InstructionAt(index);
         if (instr->HasPointerMap()) {
-          instr->pointer_map()->RecordPointer(input_copy, zone());
+          instr->pointer_map()->RecordPointer(input_copy, chunk()->zone());
         }
       } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
         // The input is assumed to immediately have a tagged representation,
@@ -913,7 +921,8 @@
     if (IsGapAt(index)) {
       // We have a gap at this position.
       LGap* gap = GapAt(index);
-      LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
+      LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
+                                                         chunk()->zone());
       const ZoneList<LMoveOperands>* move_operands = move->move_operands();
       for (int i = 0; i < move_operands->length(); ++i) {
         LMoveOperands* cur = &move_operands->at(i);
@@ -965,7 +974,7 @@
               LiveRange* range = FixedLiveRangeFor(i);
               range->AddUseInterval(curr_position,
                                     curr_position.InstructionEnd(),
-                                    zone_);
+                                    zone());
             }
           }
         }
@@ -977,7 +986,7 @@
               LiveRange* range = FixedDoubleLiveRangeFor(i);
               range->AddUseInterval(curr_position,
                                     curr_position.InstructionEnd(),
-                                    zone_);
+                                    zone());
             }
           }
         }
@@ -1025,7 +1034,8 @@
   const ZoneList<HPhi*>* phis = block->phis();
   for (int i = 0; i < phis->length(); ++i) {
     HPhi* phi = phis->at(i);
-    LUnallocated* phi_operand = new(zone_) LUnallocated(LUnallocated::NONE);
+    LUnallocated* phi_operand =
+        new(chunk()->zone()) LUnallocated(LUnallocated::NONE);
     phi_operand->set_virtual_register(phi->id());
     for (int j = 0; j < phi->OperandCount(); ++j) {
       HValue* op = phi->OperandAt(j);
@@ -1035,7 +1045,8 @@
         operand = chunk_->DefineConstantOperand(constant);
       } else {
         ASSERT(!op->EmitAtUses());
-        LUnallocated* unalloc = new(zone_) LUnallocated(LUnallocated::ANY);
+        LUnallocated* unalloc =
+            new(chunk()->zone()) LUnallocated(LUnallocated::ANY);
         unalloc->set_virtual_register(op->id());
         operand = unalloc;
       }
@@ -1058,17 +1069,17 @@
           InstructionAt(cur_block->last_instruction_index());
       if (branch->HasPointerMap()) {
         if (phi->representation().IsTagged() && !phi->type().IsSmi()) {
-          branch->pointer_map()->RecordPointer(phi_operand, zone());
+          branch->pointer_map()->RecordPointer(phi_operand, chunk()->zone());
         } else if (!phi->representation().IsDouble()) {
-          branch->pointer_map()->RecordUntagged(phi_operand, zone());
+          branch->pointer_map()->RecordUntagged(phi_operand, chunk()->zone());
         }
       }
     }
 
     LiveRange* live_range = LiveRangeFor(phi->id());
     LLabel* label = chunk_->GetLabel(phi->block()->block_id());
-    label->GetOrCreateParallelMove(LGap::START, zone())->
-        AddMove(phi_operand, live_range->GetSpillOperand(), zone());
+    label->GetOrCreateParallelMove(LGap::START, chunk()->zone())->
+        AddMove(phi_operand, live_range->GetSpillOperand(), chunk()->zone());
     live_range->SetSpillStartIndex(phi->block()->first_instruction_index());
   }
 }
@@ -1078,12 +1089,11 @@
   ASSERT(chunk_ == NULL);
   chunk_ = static_cast<LPlatformChunk*>(chunk);
   assigned_registers_ =
-      new(zone()) BitVector(Register::NumAllocatableRegisters(), zone());
-  assigned_registers_->Clear();
+      new(chunk->zone()) BitVector(Register::NumAllocatableRegisters(),
+                                   chunk->zone());
   assigned_double_registers_ =
-      new(zone()) BitVector(DoubleRegister::NumAllocatableRegisters(),
-                            zone());
-  assigned_double_registers_->Clear();
+      new(chunk->zone()) BitVector(DoubleRegister::NumAllocatableRegisters(),
+                                   chunk->zone());
   MeetRegisterConstraints();
   if (!AllocationOk()) return false;
   ResolvePhis();
@@ -1093,7 +1103,6 @@
   AllocateDoubleRegisters();
   if (!AllocationOk()) return false;
   PopulatePointerMaps();
-  if (has_osr_entry_) ProcessOsrEntry();
   ConnectRanges();
   ResolveControlFlow();
   return true;
@@ -1101,7 +1110,7 @@
 
 
 void LAllocator::MeetRegisterConstraints() {
-  HPhase phase("L_Register constraints", chunk_);
+  LAllocatorPhase phase("L_Register constraints", this);
   first_artificial_register_ = next_virtual_register_;
   const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
   for (int i = 0; i < blocks->length(); ++i) {
@@ -1113,7 +1122,7 @@
 
 
 void LAllocator::ResolvePhis() {
-  HPhase phase("L_Resolve phis", chunk_);
+  LAllocatorPhase phase("L_Resolve phis", this);
 
   // Process the blocks in reverse order.
   const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
@@ -1149,8 +1158,8 @@
   if (cur_cover->IsSpilled()) return;
   ASSERT(pred_cover != NULL && cur_cover != NULL);
   if (pred_cover != cur_cover) {
-    LOperand* pred_op = pred_cover->CreateAssignedOperand(zone_);
-    LOperand* cur_op = cur_cover->CreateAssignedOperand(zone_);
+    LOperand* pred_op = pred_cover->CreateAssignedOperand(chunk()->zone());
+    LOperand* cur_op = cur_cover->CreateAssignedOperand(chunk()->zone());
     if (!pred_op->Equals(cur_op)) {
       LGap* gap = NULL;
       if (block->predecessors()->length() == 1) {
@@ -1170,7 +1179,7 @@
         LInstruction* branch = InstructionAt(pred->last_instruction_index());
         if (branch->HasPointerMap()) {
           if (HasTaggedValue(range->id())) {
-            branch->pointer_map()->RecordPointer(cur_op, zone());
+            branch->pointer_map()->RecordPointer(cur_op, chunk()->zone());
           } else if (!cur_op->IsDoubleStackSlot() &&
                      !cur_op->IsDoubleRegister()) {
             branch->pointer_map()->RemovePointer(cur_op);
@@ -1178,7 +1187,8 @@
         }
       }
       gap->GetOrCreateParallelMove(
-          LGap::START, zone())->AddMove(pred_op, cur_op, zone());
+          LGap::START, chunk()->zone())->AddMove(pred_op, cur_op,
+                                                 chunk()->zone());
     }
   }
 }
@@ -1189,11 +1199,11 @@
   if (IsGapAt(index)) {
     LGap* gap = GapAt(index);
     return gap->GetOrCreateParallelMove(
-        pos.IsInstructionStart() ? LGap::START : LGap::END, zone());
+        pos.IsInstructionStart() ? LGap::START : LGap::END, chunk()->zone());
   }
   int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
   return GapAt(gap_pos)->GetOrCreateParallelMove(
-      (gap_pos < index) ? LGap::AFTER : LGap::BEFORE, zone());
+      (gap_pos < index) ? LGap::AFTER : LGap::BEFORE, chunk()->zone());
 }
 
 
@@ -1204,7 +1214,7 @@
 
 
 void LAllocator::ConnectRanges() {
-  HPhase phase("L_Connect ranges", this);
+  LAllocatorPhase phase("L_Connect ranges", this);
   for (int i = 0; i < live_ranges()->length(); ++i) {
     LiveRange* first_range = live_ranges()->at(i);
     if (first_range == NULL || first_range->parent() != NULL) continue;
@@ -1223,9 +1233,12 @@
           }
           if (should_insert) {
             LParallelMove* move = GetConnectingParallelMove(pos);
-            LOperand* prev_operand = first_range->CreateAssignedOperand(zone_);
-            LOperand* cur_operand = second_range->CreateAssignedOperand(zone_);
-            move->AddMove(prev_operand, cur_operand, zone());
+            LOperand* prev_operand = first_range->CreateAssignedOperand(
+                chunk()->zone());
+            LOperand* cur_operand = second_range->CreateAssignedOperand(
+                chunk()->zone());
+            move->AddMove(prev_operand, cur_operand,
+                          chunk()->zone());
           }
         }
       }
@@ -1244,7 +1257,7 @@
 
 
 void LAllocator::ResolveControlFlow() {
-  HPhase phase("L_Resolve control flow", this);
+  LAllocatorPhase phase("L_Resolve control flow", this);
   const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
   for (int block_id = 1; block_id < blocks->length(); ++block_id) {
     HBasicBlock* block = blocks->at(block_id);
@@ -1265,7 +1278,7 @@
 
 
 void LAllocator::BuildLiveRanges() {
-  HPhase phase("L_Build live ranges", this);
+  LAllocatorPhase phase("L_Build live ranges", this);
   InitializeLivenessAnalysis();
   // Process the blocks in reverse order.
   const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
@@ -1290,7 +1303,8 @@
       LOperand* hint = NULL;
       LOperand* phi_operand = NULL;
       LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
-      LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
+      LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
+                                                         chunk()->zone());
       for (int j = 0; j < move->move_operands()->length(); ++j) {
         LOperand* to = move->move_operands()->at(j).destination();
         if (to->IsUnallocated() &&
@@ -1327,7 +1341,7 @@
       while (!iterator.Done()) {
         int operand_index = iterator.Current();
         LiveRange* range = LiveRangeFor(operand_index);
-        range->EnsureInterval(start, end, zone_);
+        range->EnsureInterval(start, end, zone());
         iterator.Advance();
       }
 
@@ -1377,7 +1391,7 @@
 
 
 void LAllocator::PopulatePointerMaps() {
-  HPhase phase("L_Populate pointer maps", this);
+  LAllocatorPhase phase("L_Populate pointer maps", this);
   const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
 
   ASSERT(SafePointsAreInOrder());
@@ -1448,47 +1462,16 @@
           safe_point >= range->spill_start_index()) {
         TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
                    range->id(), range->spill_start_index(), safe_point);
-        map->RecordPointer(range->GetSpillOperand(), zone());
+        map->RecordPointer(range->GetSpillOperand(), chunk()->zone());
       }
 
       if (!cur->IsSpilled()) {
         TraceAlloc("Pointer in register for range %d (start at %d) "
                    "at safe point %d\n",
                    cur->id(), cur->Start().Value(), safe_point);
-        LOperand* operand = cur->CreateAssignedOperand(zone_);
+        LOperand* operand = cur->CreateAssignedOperand(chunk()->zone());
         ASSERT(!operand->IsStackSlot());
-        map->RecordPointer(operand, zone());
-      }
-    }
-  }
-}
-
-
-void LAllocator::ProcessOsrEntry() {
-  const ZoneList<LInstruction*>* instrs = chunk_->instructions();
-
-  // Linear search for the OSR entry instruction in the chunk.
-  int index = -1;
-  while (++index < instrs->length() &&
-         !instrs->at(index)->IsOsrEntry()) {
-  }
-  ASSERT(index < instrs->length());
-  LOsrEntry* instruction = LOsrEntry::cast(instrs->at(index));
-
-  LifetimePosition position = LifetimePosition::FromInstructionIndex(index);
-  for (int i = 0; i < live_ranges()->length(); ++i) {
-    LiveRange* range = live_ranges()->at(i);
-    if (range != NULL) {
-      if (range->Covers(position) &&
-          range->HasRegisterAssigned() &&
-          range->TopLevel()->HasAllocatedSpillOperand()) {
-        int reg_index = range->assigned_register();
-        LOperand* spill_operand = range->TopLevel()->GetSpillOperand();
-        if (range->IsDouble()) {
-          instruction->MarkSpilledDoubleRegister(reg_index, spill_operand);
-        } else {
-          instruction->MarkSpilledRegister(reg_index, spill_operand);
-        }
+        map->RecordPointer(operand, chunk()->zone());
       }
     }
   }
@@ -1496,14 +1479,14 @@
 
 
 void LAllocator::AllocateGeneralRegisters() {
-  HPhase phase("L_Allocate general registers", this);
+  LAllocatorPhase phase("L_Allocate general registers", this);
   num_registers_ = Register::NumAllocatableRegisters();
   AllocateRegisters();
 }
 
 
 void LAllocator::AllocateDoubleRegisters() {
-  HPhase phase("L_Allocate double registers", this);
+  LAllocatorPhase phase("L_Allocate double registers", this);
   num_registers_ = DoubleRegister::NumAllocatableRegisters();
   mode_ = DOUBLE_REGISTERS;
   AllocateRegisters();
@@ -1829,7 +1812,7 @@
       TraceAlloc("Assigning preferred reg %s to live range %d\n",
                  RegisterName(register_index),
                  current->id());
-      SetLiveRangeAssignedRegister(current, register_index, mode_, zone_);
+      SetLiveRangeAssignedRegister(current, register_index, mode_);
       return true;
     }
   }
@@ -1864,7 +1847,7 @@
   TraceAlloc("Assigning free reg %s to live range %d\n",
              RegisterName(reg),
              current->id());
-  SetLiveRangeAssignedRegister(current, reg, mode_, zone_);
+  SetLiveRangeAssignedRegister(current, reg, mode_);
 
   return true;
 }
@@ -1949,7 +1932,7 @@
   TraceAlloc("Assigning blocked reg %s to live range %d\n",
              RegisterName(reg),
              current->id());
-  SetLiveRangeAssignedRegister(current, reg, mode_, zone_);
+  SetLiveRangeAssignedRegister(current, reg, mode_);
 
   // This register was not free. Thus we need to find and spill
   // parts of active and inactive live regions that use the same register
@@ -2061,7 +2044,7 @@
   int vreg = GetVirtualRegister();
   if (!AllocationOk()) return NULL;
   LiveRange* result = LiveRangeFor(vreg);
-  range->SplitAt(pos, result, zone_);
+  range->SplitAt(pos, result, zone());
   return result;
 }
 
@@ -2169,7 +2152,7 @@
     if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS);
     first->SetSpillOperand(op);
   }
-  range->MakeSpilled(zone_);
+  range->MakeSpilled(chunk()->zone());
 }
 
 
@@ -2192,4 +2175,32 @@
 #endif
 
 
+LAllocatorPhase::LAllocatorPhase(const char* name, LAllocator* allocator)
+    : CompilationPhase(name, allocator->graph()->info()),
+      allocator_(allocator) {
+  if (FLAG_hydrogen_stats) {
+    allocator_zone_start_allocation_size_ =
+        allocator->zone()->allocation_size();
+  }
+}
+
+
+LAllocatorPhase::~LAllocatorPhase() {
+  if (FLAG_hydrogen_stats) {
+    unsigned size = allocator_->zone()->allocation_size() -
+                    allocator_zone_start_allocation_size_;
+    isolate()->GetHStatistics()->SaveTiming(name(), 0, size);
+  }
+
+  if (ShouldProduceTraceOutput()) {
+    isolate()->GetHTracer()->TraceLithium(name(), allocator_->chunk());
+    isolate()->GetHTracer()->TraceLiveRanges(name(), allocator_);
+  }
+
+#ifdef DEBUG
+  if (allocator_ != NULL) allocator_->Verify();
+#endif
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index 552ebdd..e5edd3c 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -434,7 +434,7 @@
   LPlatformChunk* chunk() const { return chunk_; }
   HGraph* graph() const { return graph_; }
   Isolate* isolate() const { return graph_->isolate(); }
-  Zone* zone() const { return zone_; }
+  Zone* zone() { return &zone_; }
 
   int GetVirtualRegister() {
     if (next_virtual_register_ >= LUnallocated::kMaxVirtualRegisters) {
@@ -474,7 +474,6 @@
   void ConnectRanges();
   void ResolveControlFlow();
   void PopulatePointerMaps();
-  void ProcessOsrEntry();
   void AllocateRegisters();
   bool CanEagerlyResolveControlFlow(HBasicBlock* block) const;
   inline bool SafePointsAreInOrder() const;
@@ -571,8 +570,7 @@
 
   inline void SetLiveRangeAssignedRegister(LiveRange* range,
                                            int reg,
-                                           RegisterKind register_kind,
-                                           Zone* zone);
+                                           RegisterKind register_kind);
 
   // Return parallel move that should be used to connect ranges split at the
   // given position.
@@ -599,7 +597,7 @@
 
   inline LGap* GapAt(int index);
 
-  Zone* zone_;
+  Zone zone_;
 
   LPlatformChunk* chunk_;
 
@@ -646,6 +644,19 @@
 };
 
 
+class LAllocatorPhase : public CompilationPhase {
+ public:
+  LAllocatorPhase(const char* name, LAllocator* allocator);
+  ~LAllocatorPhase();
+
+ private:
+  LAllocator* allocator_;
+  unsigned allocator_zone_start_allocation_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(LAllocatorPhase);
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_LITHIUM_ALLOCATOR_H_
diff --git a/src/lithium.cc b/src/lithium.cc
index 2993c9a..b22fdf6 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -307,7 +307,7 @@
 }
 
 void LChunk::MarkEmptyBlocks() {
-  HPhase phase("L_Mark empty blocks", this);
+  LPhase phase("L_Mark empty blocks", this);
   for (int i = 0; i < graph()->blocks()->length(); ++i) {
     HBasicBlock* block = graph()->blocks()->at(i);
     int first = block->first_instruction_index();
@@ -491,4 +491,11 @@
 }
 
 
+LPhase::~LPhase() {
+  if (ShouldProduceTraceOutput()) {
+    isolate()->GetHTracer()->TraceLithium(name(), chunk_);
+  }
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/lithium.h b/src/lithium.h
index ab34453..1e0784e 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -533,8 +533,6 @@
         values_(value_count, zone),
         is_tagged_(value_count, zone),
         is_uint32_(value_count, zone),
-        spilled_registers_(NULL),
-        spilled_double_registers_(NULL),
         outer_(outer),
         entry_(entry),
         zone_(zone) { }
@@ -548,10 +546,6 @@
   int translation_size() const { return translation_size_; }
   int parameter_count() const { return parameter_count_; }
   int pc_offset() const { return pc_offset_; }
-  LOperand** spilled_registers() const { return spilled_registers_; }
-  LOperand** spilled_double_registers() const {
-    return spilled_double_registers_;
-  }
   const ZoneList<LOperand*>* values() const { return &values_; }
   LEnvironment* outer() const { return outer_; }
   HEnterInlined* entry() { return entry_; }
@@ -591,12 +585,6 @@
     return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
   }
 
-  void SetSpilledRegisters(LOperand** registers,
-                           LOperand** double_registers) {
-    spilled_registers_ = registers;
-    spilled_double_registers_ = double_registers;
-  }
-
   void PrintTo(StringStream* stream);
 
  private:
@@ -615,13 +603,6 @@
   ZoneList<LOperand*> values_;
   GrowableBitVector is_tagged_;
   GrowableBitVector is_uint32_;
-
-  // Allocation index indexed arrays of spill slot operands for registers
-  // that are also in spill slots at an OSR entry.  NULL for environments
-  // that do not correspond to an OSR entry.
-  LOperand** spilled_registers_;
-  LOperand** spilled_double_registers_;
-
   LEnvironment* outer_;
   HEnterInlined* entry_;
   Zone* zone_;
@@ -778,6 +759,20 @@
 };
 
 
+class LPhase : public CompilationPhase {
+ public:
+  LPhase(const char* name, LChunk* chunk)
+      : CompilationPhase(name, chunk->info()),
+        chunk_(chunk) { }
+  ~LPhase();
+
+ private:
+  LChunk* chunk_;
+
+  DISALLOW_COPY_AND_ASSIGN(LPhase);
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_LITHIUM_H_
diff --git a/src/liveedit.cc b/src/liveedit.cc
index a01e502..3ec2da3 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -1832,11 +1832,11 @@
 // Drops all call frame matched by target and all frames above them.
 template<typename TARGET>
 static const char* DropActivationsInActiveThreadImpl(
-    TARGET& target, bool do_drop, Zone* zone) {
+    TARGET& target, bool do_drop) {
   Isolate* isolate = Isolate::Current();
   Debug* debug = isolate->debug();
-  ZoneScope scope(zone, DELETE_ON_EXIT);
-  Vector<StackFrame*> frames = CreateStackMap(isolate, zone);
+  Zone zone(isolate);
+  Vector<StackFrame*> frames = CreateStackMap(isolate, &zone);
 
 
   int top_frame_index = -1;
@@ -1928,12 +1928,11 @@
 // Fills result array with statuses of functions. Modifies the stack
 // removing all listed function if possible and if do_drop is true.
 static const char* DropActivationsInActiveThread(
-    Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop,
-    Zone* zone) {
+    Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
   MultipleFunctionTarget target(shared_info_array, result);
 
   const char* message =
-      DropActivationsInActiveThreadImpl(target, do_drop, zone);
+      DropActivationsInActiveThreadImpl(target, do_drop);
   if (message) {
     return message;
   }
@@ -1980,7 +1979,7 @@
 
 
 Handle<JSArray> LiveEdit::CheckAndDropActivations(
-    Handle<JSArray> shared_info_array, bool do_drop, Zone* zone) {
+    Handle<JSArray> shared_info_array, bool do_drop) {
   Isolate* isolate = shared_info_array->GetIsolate();
   int len = GetArrayLength(shared_info_array);
 
@@ -2006,7 +2005,7 @@
 
   // Try to drop activations from the current stack.
   const char* error_message =
-      DropActivationsInActiveThread(shared_info_array, result, do_drop, zone);
+      DropActivationsInActiveThread(shared_info_array, result, do_drop);
   if (error_message != NULL) {
     // Add error message as an array extra element.
     Vector<const char> vector_message(error_message, StrLength(error_message));
@@ -2047,10 +2046,10 @@
 
 // Finds a drops required frame and all frames above.
 // Returns error message or NULL.
-const char* LiveEdit::RestartFrame(JavaScriptFrame* frame, Zone* zone) {
+const char* LiveEdit::RestartFrame(JavaScriptFrame* frame) {
   SingleFrameTarget target(frame);
 
-  const char* result = DropActivationsInActiveThreadImpl(target, true, zone);
+  const char* result = DropActivationsInActiveThreadImpl(target, true);
   if (result != NULL) {
     return result;
   }
diff --git a/src/liveedit.h b/src/liveedit.h
index 5b12854..0efbb95 100644
--- a/src/liveedit.h
+++ b/src/liveedit.h
@@ -121,11 +121,11 @@
   // has restart the lowest found frames and drops all other frames above
   // if possible and if do_drop is true.
   static Handle<JSArray> CheckAndDropActivations(
-      Handle<JSArray> shared_info_array, bool do_drop, Zone* zone);
+      Handle<JSArray> shared_info_array, bool do_drop);
 
   // Restarts the call frame and completely drops all frames above it.
   // Return error message or NULL.
-  static const char* RestartFrame(JavaScriptFrame* frame, Zone* zone);
+  static const char* RestartFrame(JavaScriptFrame* frame);
 
   // A copy of this is in liveedit-debugger.js.
   enum FunctionPatchabilityStatus {
diff --git a/src/log-utils.cc b/src/log-utils.cc
index f033172..a733b52 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -237,6 +237,18 @@
 }
 
 
+void LogMessageBuilder::AppendDoubleQuotedString(const char* string) {
+  Append('"');
+  for (const char* p = string; *p != '\0'; p++) {
+    if (*p == '"') {
+      Append('\\');
+    }
+    Append(*p);
+  }
+  Append('"');
+}
+
+
 void LogMessageBuilder::Append(String* str) {
   DisallowHeapAllocation no_gc;  // Ensure string stay valid.
   int length = str->length();
diff --git a/src/log-utils.h b/src/log-utils.h
index a1867f2..c499540 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -132,6 +132,9 @@
   // Append a character to the log message.
   void Append(const char c);
 
+  // Append double quoted string to the log message.
+  void AppendDoubleQuotedString(const char* string);
+
   // Append a heap string.
   void Append(String* str);
 
diff --git a/src/log.cc b/src/log.cc
index 908a368..82ce886 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -862,7 +862,7 @@
                                    Address entry_point) {
   if (!log_->IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg(this);
-  msg.Append("%s,%s,-3,",
+  msg.Append("%s,%s,-2,",
              kLogEventsNames[CODE_CREATION_EVENT],
              kLogEventsNames[CALLBACK_TAG]);
   msg.AppendAddress(entry_point);
@@ -903,18 +903,39 @@
 }
 
 
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
-                             Code* code,
-                             const char* comment) {
-  if (!is_logging_code_events()) return;
-  if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
+void Logger::AppendName(Name* name) {
+  if (name->IsString()) {
+    name_buffer_->AppendString(String::cast(name));
+  } else {
+    Symbol* symbol = Symbol::cast(name);
+    name_buffer_->AppendBytes("symbol(");
+    if (!symbol->name()->IsUndefined()) {
+      name_buffer_->AppendBytes("\"");
+      name_buffer_->AppendString(String::cast(symbol->name()));
+      name_buffer_->AppendBytes("\" ");
+    }
+    name_buffer_->AppendBytes("hash ");
+    name_buffer_->AppendHex(symbol->Hash());
+    name_buffer_->AppendByte(')');
+  }
+}
+
+
+void Logger::InitNameBuffer(LogEventsAndTags tag) {
     name_buffer_->Reset();
     name_buffer_->AppendBytes(kLogEventsNames[tag]);
     name_buffer_->AppendByte(':');
-    name_buffer_->AppendBytes(comment);
-  }
+}
+
+
+void Logger::LogRecordedBuffer(Code* code, SharedFunctionInfo* shared) {
   if (code_event_handler_ != NULL) {
-    IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
+    Script* script = shared && shared->script()->IsScript() ?
+        Script::cast(shared->script()) : NULL;
+    IssueCodeAddedEvent(code,
+                        script,
+                        name_buffer_->get(),
+                        name_buffer_->size());
   }
   if (!log_->IsEnabled()) return;
   if (FLAG_ll_prof) {
@@ -923,21 +944,49 @@
   if (Serializer::enabled()) {
     RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
   }
-  if (!FLAG_log_code) return;
-  LogMessageBuilder msg(this);
-  msg.Append("%s,%s,%d,",
-             kLogEventsNames[CODE_CREATION_EVENT],
-             kLogEventsNames[tag],
-             code->kind());
-  msg.AppendAddress(code->address());
-  msg.Append(",%d,\"", code->ExecutableSize());
-  for (const char* p = comment; *p != '\0'; p++) {
-    if (*p == '"') {
-      msg.Append('\\');
+}
+
+
+void Logger::AppendCodeCreateHeader(LogMessageBuilder* msg,
+                                    LogEventsAndTags tag,
+                                    Code* code) {
+  ASSERT(msg);
+  msg->Append("%s,%s,%d,",
+              kLogEventsNames[CODE_CREATION_EVENT],
+              kLogEventsNames[tag],
+              code->kind());
+  msg->AppendAddress(code->address());
+  msg->Append(",%d,", code->ExecutableSize());
+}
+
+
+void Logger::AppendSymbolName(LogMessageBuilder* msg,
+                              Symbol* symbol) {
+    ASSERT(symbol);
+    msg->Append("symbol(");
+    if (!symbol->name()->IsUndefined()) {
+      msg->Append("\"");
+      msg->AppendDetailed(String::cast(symbol->name()), false);
+      msg->Append("\" ");
     }
-    msg.Append(*p);
+    msg->Append("hash %x)", symbol->Hash());
+}
+
+
+void Logger::CodeCreateEvent(LogEventsAndTags tag,
+                             Code* code,
+                             const char* comment) {
+  if (!is_logging_code_events()) return;
+  if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
+    InitNameBuffer(tag);
+    name_buffer_->AppendBytes(comment);
+    LogRecordedBuffer(code, NULL);
   }
-  msg.Append('"');
+
+  if (!FLAG_log_code || !log_->IsEnabled()) return;
+  LogMessageBuilder msg(this);
+  AppendCodeCreateHeader(&msg, tag, code);
+  msg.AppendDoubleQuotedString(comment);
   msg.Append('\n');
   msg.WriteToLogFile();
 }
@@ -948,55 +997,20 @@
                              Name* name) {
   if (!is_logging_code_events()) return;
   if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
-    name_buffer_->Reset();
-    name_buffer_->AppendBytes(kLogEventsNames[tag]);
-    name_buffer_->AppendByte(':');
-    if (name->IsString()) {
-      name_buffer_->AppendString(String::cast(name));
-    } else {
-      Symbol* symbol = Symbol::cast(name);
-      name_buffer_->AppendBytes("symbol(");
-      if (!symbol->name()->IsUndefined()) {
-        name_buffer_->AppendBytes("\"");
-        name_buffer_->AppendString(String::cast(symbol->name()));
-        name_buffer_->AppendBytes("\" ");
-      }
-      name_buffer_->AppendBytes("hash ");
-      name_buffer_->AppendHex(symbol->Hash());
-      name_buffer_->AppendByte(')');
-    }
+    InitNameBuffer(tag);
+    AppendName(name);
+    LogRecordedBuffer(code, NULL);
   }
-  if (code_event_handler_ != NULL) {
-    IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
-  }
-  if (!log_->IsEnabled()) return;
-  if (FLAG_ll_prof) {
-    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
-  }
-  if (Serializer::enabled()) {
-    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
-  }
-  if (!FLAG_log_code) return;
+
+  if (!FLAG_log_code || !log_->IsEnabled()) return;
   LogMessageBuilder msg(this);
-  msg.Append("%s,%s,%d,",
-             kLogEventsNames[CODE_CREATION_EVENT],
-             kLogEventsNames[tag],
-             code->kind());
-  msg.AppendAddress(code->address());
-  msg.Append(",%d,", code->ExecutableSize());
+  AppendCodeCreateHeader(&msg, tag, code);
   if (name->IsString()) {
     msg.Append('"');
     msg.AppendDetailed(String::cast(name), false);
     msg.Append('"');
   } else {
-    Symbol* symbol = Symbol::cast(name);
-    msg.Append("symbol(");
-    if (!symbol->name()->IsUndefined()) {
-      msg.Append("\"");
-      msg.AppendDetailed(String::cast(symbol->name()), false);
-      msg.Append("\" ");
-    }
-    msg.Append("hash %x)", symbol->Hash());
+    AppendSymbolName(&msg, Symbol::cast(name));
   }
   msg.Append('\n');
   msg.WriteToLogFile();
@@ -1020,65 +1034,25 @@
                              Name* name) {
   if (!is_logging_code_events()) return;
   if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
-    name_buffer_->Reset();
-    name_buffer_->AppendBytes(kLogEventsNames[tag]);
-    name_buffer_->AppendByte(':');
+    InitNameBuffer(tag);
     name_buffer_->AppendBytes(ComputeMarker(code));
-    if (name->IsString()) {
-      name_buffer_->AppendString(String::cast(name));
-    } else {
-      Symbol* symbol = Symbol::cast(name);
-      name_buffer_->AppendBytes("symbol(");
-      if (!symbol->name()->IsUndefined()) {
-        name_buffer_->AppendBytes("\"");
-        name_buffer_->AppendString(String::cast(symbol->name()));
-        name_buffer_->AppendBytes("\" ");
-      }
-      name_buffer_->AppendBytes("hash ");
-      name_buffer_->AppendHex(symbol->Hash());
-      name_buffer_->AppendByte(')');
-    }
+    AppendName(name);
+    LogRecordedBuffer(code, shared);
   }
-  if (code_event_handler_ != NULL) {
-    Script* script =
-        shared->script()->IsScript() ? Script::cast(shared->script()) : NULL;
-    IssueCodeAddedEvent(code,
-                        script,
-                        name_buffer_->get(),
-                        name_buffer_->size());
-  }
-  if (!log_->IsEnabled()) return;
-  if (FLAG_ll_prof) {
-    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
-  }
-  if (Serializer::enabled()) {
-    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
-  }
-  if (!FLAG_log_code) return;
+
+  if (!FLAG_log_code || !log_->IsEnabled()) return;
   if (code == Isolate::Current()->builtins()->builtin(
       Builtins::kLazyCompile))
     return;
 
   LogMessageBuilder msg(this);
-  msg.Append("%s,%s,%d,",
-             kLogEventsNames[CODE_CREATION_EVENT],
-             kLogEventsNames[tag],
-             code->kind());
-  msg.AppendAddress(code->address());
-  msg.Append(",%d,", code->ExecutableSize());
+  AppendCodeCreateHeader(&msg, tag, code);
   if (name->IsString()) {
     SmartArrayPointer<char> str =
         String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
     msg.Append("\"%s\"", *str);
   } else {
-    Symbol* symbol = Symbol::cast(name);
-    msg.Append("symbol(");
-    if (!symbol->name()->IsUndefined()) {
-      msg.Append("\"");
-      msg.AppendDetailed(String::cast(symbol->name()), false);
-      msg.Append("\" ");
-    }
-    msg.Append("hash %x)", symbol->Hash());
+    AppendSymbolName(&msg, Symbol::cast(name));
   }
   msg.Append(',');
   msg.AppendAddress(shared->address());
@@ -1098,9 +1072,7 @@
                              Name* source, int line) {
   if (!is_logging_code_events()) return;
   if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
-    name_buffer_->Reset();
-    name_buffer_->AppendBytes(kLogEventsNames[tag]);
-    name_buffer_->AppendByte(':');
+    InitNameBuffer(tag);
     name_buffer_->AppendBytes(ComputeMarker(code));
     name_buffer_->AppendString(shared->DebugName());
     name_buffer_->AppendByte(' ');
@@ -1113,45 +1085,21 @@
     }
     name_buffer_->AppendByte(':');
     name_buffer_->AppendInt(line);
+    LogRecordedBuffer(code, shared);
   }
-  if (code_event_handler_ != NULL) {
-    Script* script =
-        shared->script()->IsScript() ? Script::cast(shared->script()) : NULL;
-    IssueCodeAddedEvent(code,
-                        script,
-                        name_buffer_->get(),
-                        name_buffer_->size());
-  }
-  if (!log_->IsEnabled()) return;
-  if (FLAG_ll_prof) {
-    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
-  }
-  if (Serializer::enabled()) {
-    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
-  }
-  if (!FLAG_log_code) return;
+
+  if (!FLAG_log_code || !log_->IsEnabled()) return;
   LogMessageBuilder msg(this);
+  AppendCodeCreateHeader(&msg, tag, code);
   SmartArrayPointer<char> name =
       shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-  msg.Append("%s,%s,%d,",
-             kLogEventsNames[CODE_CREATION_EVENT],
-             kLogEventsNames[tag],
-             code->kind());
-  msg.AppendAddress(code->address());
-  msg.Append(",%d,\"%s ", code->ExecutableSize(), *name);
+  msg.Append("\"%s ", *name);
   if (source->IsString()) {
     SmartArrayPointer<char> sourcestr =
        String::cast(source)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
     msg.Append("%s", *sourcestr);
   } else {
-    Symbol* symbol = Symbol::cast(source);
-    msg.Append("symbol(");
-    if (!symbol->name()->IsUndefined()) {
-      msg.Append("\"");
-      msg.AppendDetailed(String::cast(symbol->name()), false);
-      msg.Append("\" ");
-    }
-    msg.Append("hash %x)", symbol->Hash());
+    AppendSymbolName(&msg, Symbol::cast(source));
   }
   msg.Append(":%d\",", line);
   msg.AppendAddress(shared->address());
@@ -1164,29 +1112,15 @@
 void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
   if (!is_logging_code_events()) return;
   if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
-    name_buffer_->Reset();
-    name_buffer_->AppendBytes(kLogEventsNames[tag]);
-    name_buffer_->AppendByte(':');
+    InitNameBuffer(tag);
     name_buffer_->AppendInt(args_count);
+    LogRecordedBuffer(code, NULL);
   }
-  if (code_event_handler_ != NULL) {
-    IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
-  }
-  if (!log_->IsEnabled()) return;
-  if (FLAG_ll_prof) {
-    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
-  }
-  if (Serializer::enabled()) {
-    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
-  }
-  if (!FLAG_log_code) return;
+
+  if (!FLAG_log_code || !log_->IsEnabled()) return;
   LogMessageBuilder msg(this);
-  msg.Append("%s,%s,%d,",
-             kLogEventsNames[CODE_CREATION_EVENT],
-             kLogEventsNames[tag],
-             code->kind());
-  msg.AppendAddress(code->address());
-  msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
+  AppendCodeCreateHeader(&msg, tag, code);
+  msg.Append("\"args_count: %d\"", args_count);
   msg.Append('\n');
   msg.WriteToLogFile();
 }
@@ -1202,30 +1136,17 @@
 void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
   if (!is_logging_code_events()) return;
   if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
-    name_buffer_->Reset();
-    name_buffer_->AppendBytes(kLogEventsNames[REG_EXP_TAG]);
-    name_buffer_->AppendByte(':');
+    InitNameBuffer(REG_EXP_TAG);
     name_buffer_->AppendString(source);
+    LogRecordedBuffer(code, NULL);
   }
-  if (code_event_handler_ != NULL) {
-    IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
-  }
-  if (!log_->IsEnabled()) return;
-  if (FLAG_ll_prof) {
-    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
-  }
-  if (Serializer::enabled()) {
-    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
-  }
-  if (!FLAG_log_code) return;
+
+  if (!FLAG_log_code || !log_->IsEnabled()) return;
   LogMessageBuilder msg(this);
-  msg.Append("%s,%s,-2,",
-             kLogEventsNames[CODE_CREATION_EVENT],
-             kLogEventsNames[REG_EXP_TAG]);
-  msg.AppendAddress(code->address());
-  msg.Append(",%d,\"", code->ExecutableSize());
+  AppendCodeCreateHeader(&msg, REG_EXP_TAG, code);
+  msg.Append('"');
   msg.AppendDetailed(source, false);
-  msg.Append('\"');
+  msg.Append('"');
   msg.Append('\n');
   msg.WriteToLogFile();
 }
@@ -1294,12 +1215,9 @@
     const char* code_name = address_to_name_map_->Lookup(addr);
     if (code_name == NULL) return;  // Not a code object.
     LogMessageBuilder msg(this);
-    msg.Append("%s,%d,\"", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos);
-    for (const char* p = code_name; *p != '\0'; ++p) {
-      if (*p == '"') msg.Append('\\');
-      msg.Append(*p);
-    }
-    msg.Append("\"\n");
+    msg.Append("%s,%d,", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos);
+    msg.AppendDoubleQuotedString(code_name);
+    msg.Append("\n");
     msg.WriteToLogFile();
   }
   if (!FLAG_log_snapshot_positions) return;
@@ -1371,14 +1289,7 @@
     msg.Append(String::cast(name));
     msg.Append('"');
   } else {
-    Symbol* symbol = Symbol::cast(name);
-    msg.Append("symbol(");
-    if (!symbol->name()->IsUndefined()) {
-      msg.Append("\"");
-      msg.AppendDetailed(String::cast(symbol->name()), false);
-      msg.Append("\" ");
-    }
-    msg.Append("hash %x)", symbol->Hash());
+    AppendSymbolName(&msg, Symbol::cast(name));
   }
   msg.Append('\n');
   msg.WriteToLogFile();
@@ -1600,6 +1511,10 @@
         description = "A stub from the snapshot";
       tag = Logger::STUB_TAG;
       break;
+    case Code::REGEXP:
+      description = "Regular expression code";
+      tag = Logger::REG_EXP_TAG;
+      break;
     case Code::BUILTIN:
       description = "A builtin from the snapshot";
       tag = Logger::BUILTIN_TAG;
@@ -1628,6 +1543,8 @@
       description = "A keyed call IC from the snapshot";
       tag = Logger::KEYED_CALL_IC_TAG;
       break;
+    case Code::NUMBER_OF_KINDS:
+      break;
   }
   PROFILE(isolate_, CodeCreateEvent(tag, code_object, description));
 }
diff --git a/src/log.h b/src/log.h
index 531f1de..f6e2ed9 100644
--- a/src/log.h
+++ b/src/log.h
@@ -413,6 +413,21 @@
   // Used for logging stubs found in the snapshot.
   void LogCodeObject(Object* code_object);
 
+  // Helper method. It resets name_buffer_ and add tag name into it.
+  void InitNameBuffer(LogEventsAndTags tag);
+
+  // Helper method. It push recorded buffer into different handlers.
+  void LogRecordedBuffer(Code*, SharedFunctionInfo*);
+
+  // Helper method. It dumps name into name_buffer_.
+  void AppendName(Name* name);
+
+  // Appends standard code header.
+  void AppendCodeCreateHeader(LogMessageBuilder*, LogEventsAndTags, Code*);
+
+  // Appends symbol for the name.
+  void AppendSymbolName(LogMessageBuilder*, Symbol*);
+
   // Emits general information about generated code.
   void LogCodeInfo();
 
diff --git a/src/macros.py b/src/macros.py
index 643d6c7..e442b44 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -117,6 +117,7 @@
 macro IS_ARGUMENTS(arg)         = (%_ClassOf(arg) === 'Arguments');
 macro IS_GLOBAL(arg)            = (%_ClassOf(arg) === 'global');
 macro IS_ARRAYBUFFER(arg)       = (%_ClassOf(arg) === 'ArrayBuffer');
+macro IS_DATAVIEW(arg)          = (%_ClassOf(arg) === 'DataView');
 macro IS_GENERATOR(arg)         = (%_ClassOf(arg) === 'Generator');
 macro IS_UNDETECTABLE(arg)      = (%_IsUndetectableObject(arg));
 macro FLOOR(arg)                = $floor(arg);
@@ -144,7 +145,6 @@
 macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
 macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
 macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg)));
-macro TO_POSITIVE_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? (arg > 0 ? arg : 0) : %NumberToPositiveInteger(ToNumber(arg)));
 macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
 macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
 macro TO_UINT32(arg) = (arg >>> 0);
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index c06bfd7..3940b56 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1319,7 +1319,7 @@
   InstanceType type = map->instance_type();
   if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
 
-  Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
+  Object* second = reinterpret_cast<ConsString*>(object)->second();
   Heap* heap = map->GetHeap();
   if (second != heap->empty_string()) {
     return object;
@@ -1328,7 +1328,7 @@
   // Since we don't have the object's start, it is impossible to update the
   // page dirty marks. Therefore, we only replace the string with its left
   // substring when page dirty marks do not change.
-  Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
+  Object* first = reinterpret_cast<ConsString*>(object)->first();
   if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
 
   *p = first;
@@ -1713,23 +1713,6 @@
     MarkCompactMarkingVisitor::non_count_table_;
 
 
-class MarkingVisitor : public ObjectVisitor {
- public:
-  explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
-
-  void VisitPointer(Object** p) {
-    MarkCompactMarkingVisitor::VisitPointer(heap_, p);
-  }
-
-  void VisitPointers(Object** start, Object** end) {
-    MarkCompactMarkingVisitor::VisitPointers(heap_, start, end);
-  }
-
- private:
-  Heap* heap_;
-};
-
-
 class CodeMarkingVisitor : public ThreadVisitor {
  public:
   explicit CodeMarkingVisitor(MarkCompactCollector* collector)
@@ -2038,14 +2021,13 @@
 }
 
 
-void MarkCompactCollector::MarkStringTable() {
+void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
   StringTable* string_table = heap()->string_table();
   // Mark the string table itself.
   MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
   SetMark(string_table, string_table_mark);
   // Explicitly mark the prefix.
-  MarkingVisitor marker(heap());
-  string_table->IteratePrefix(&marker);
+  string_table->IteratePrefix(visitor);
   ProcessMarkingDeque();
 }
 
@@ -2056,7 +2038,7 @@
   heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
 
   // Handle the string table specially.
-  MarkStringTable();
+  MarkStringTable(visitor);
 
   // There may be overflowed objects in the heap.  Visit them now.
   while (marking_deque_.overflowed()) {
@@ -2269,14 +2251,7 @@
       while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
         ASSERT(cell->IsPropertyCell());
         if (IsMarked(cell)) {
-          int offset = PropertyCell::kValueOffset;
-          MarkCompactMarkingVisitor::VisitPointer(
-              heap(),
-              reinterpret_cast<Object**>(cell->address() + offset));
-          offset = PropertyCell::kTypeOffset;
-          MarkCompactMarkingVisitor::VisitPointer(
-              heap(),
-              reinterpret_cast<Object**>(cell->address() + offset));
+          MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
         }
       }
     }
@@ -2416,7 +2391,6 @@
   for (HeapObject* obj = map_iterator.Next();
        obj != NULL;
        obj = map_iterator.Next()) {
-    if (obj->IsFreeSpace()) continue;
     Map* map = Map::cast(obj);
 
     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
@@ -2430,21 +2404,19 @@
 
 
 void MarkCompactCollector::ClearNonLiveReferences() {
-  HeapObjectIterator map_iterator(heap()->map_space());
   // Iterate over the map space, setting map transitions that go from
   // a marked map to an unmarked map to null transitions.  This action
   // is carried out only on maps of JSObjects and related subtypes.
+  HeapObjectIterator map_iterator(heap()->map_space());
   for (HeapObject* obj = map_iterator.Next();
-       obj != NULL; obj = map_iterator.Next()) {
-    Map* map = reinterpret_cast<Map*>(obj);
-    MarkBit map_mark = Marking::MarkBitFrom(map);
-    if (map->IsFreeSpace()) continue;
+       obj != NULL;
+       obj = map_iterator.Next()) {
+    Map* map = Map::cast(obj);
 
-    ASSERT(map->IsMap());
     if (!map->CanTransition()) continue;
 
-    if (map_mark.Get() &&
-        map->attached_to_shared_function_info()) {
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    if (map_mark.Get() && map->attached_to_shared_function_info()) {
       // This map is used for inobject slack tracking and has been detached
       // from SharedFunctionInfo during the mark phase.
       // Since it survived the GC, reattach it now.
@@ -2455,11 +2427,22 @@
     ClearNonLiveMapTransitions(map, map_mark);
 
     if (map_mark.Get()) {
-      ClearNonLiveDependentCode(map);
+      ClearNonLiveDependentCode(map->dependent_code());
     } else {
       ClearAndDeoptimizeDependentCode(map);
     }
   }
+
+  // Iterate over property cell space, removing dependent code that is not
+  // otherwise kept alive by strong references.
+  HeapObjectIterator cell_iterator(heap_->property_cell_space());
+  for (HeapObject* cell = cell_iterator.Next();
+       cell != NULL;
+       cell = cell_iterator.Next()) {
+    if (IsMarked(cell)) {
+      ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
+    }
+  }
 }
 
 
@@ -2545,9 +2528,8 @@
 }
 
 
-void MarkCompactCollector::ClearNonLiveDependentCode(Map* map) {
+void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
   DisallowHeapAllocation no_allocation;
-  DependentCode* entries = map->dependent_code();
   DependentCode::GroupStartIndexes starts(entries);
   int number_of_entries = starts.number_of_entries();
   if (number_of_entries == 0) return;
@@ -3291,11 +3273,9 @@
   bool code_slots_filtering_required;
   { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
     code_slots_filtering_required = MarkInvalidatedCode();
-
     EvacuateNewSpace();
   }
 
-
   { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
     EvacuatePages();
   }
@@ -3418,9 +3398,7 @@
        cell != NULL;
        cell = cell_iterator.Next()) {
     if (cell->IsCell()) {
-      Address value_address = reinterpret_cast<Address>(cell) +
-          (Cell::kValueOffset - kHeapObjectTag);
-      updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+      Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
     }
   }
 
@@ -3430,14 +3408,7 @@
        cell != NULL;
        cell = js_global_property_cell_iterator.Next()) {
     if (cell->IsPropertyCell()) {
-      Address value_address =
-          reinterpret_cast<Address>(cell) +
-          (PropertyCell::kValueOffset - kHeapObjectTag);
-      updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
-      Address type_address =
-          reinterpret_cast<Address>(cell) +
-          (PropertyCell::kTypeOffset - kHeapObjectTag);
-      updating_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
+      PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
     }
   }
 
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 873534c..db188ed 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -835,7 +835,7 @@
 
   // Mark the string table specially.  References to internalized strings from
   // the string table are weak.
-  void MarkStringTable();
+  void MarkStringTable(RootMarkingVisitor* visitor);
 
   // Mark objects in implicit references groups if their parent object
   // is marked.
@@ -880,7 +880,7 @@
   void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
 
   void ClearAndDeoptimizeDependentCode(Map* map);
-  void ClearNonLiveDependentCode(Map* map);
+  void ClearNonLiveDependentCode(DependentCode* dependent_code);
 
   // Marking detaches initial maps from SharedFunctionInfo objects
   // to make this reference weak. We need to reattach initial maps
diff --git a/src/messages.js b/src/messages.js
index ce075ce..137d98f 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -108,14 +108,23 @@
                                  ["%0"," constructor should have at least one argument."],
   not_typed_array:               ["this is not a typed array."],
   invalid_argument:              ["invalid_argument"],
+  data_view_not_array_buffer:    ["First argument to DataView constructor must be an ArrayBuffer"],
+  constructor_not_function:      ["Constructor ", "%0", " requires 'new'"],
   // RangeError
   invalid_array_length:          ["Invalid array length"],
   invalid_array_buffer_length:   ["Invalid array buffer length"],
-  invalid_typed_array_offset:    ["Start offset is too large"],
-  invalid_typed_array_length:    ["Length is too large"],
+  invalid_typed_array_offset:    ["Start offset is too large:"],
+  invalid_typed_array_length:    ["Invalid typed array length"],
   invalid_typed_array_alignment: ["%0", "of", "%1", "should be a multiple of", "%3"],
   typed_array_set_source_too_large:
                                  ["Source is too large"],
+  typed_array_set_negative_offset:
+                                 ["Start offset is negative"],
+  invalid_data_view_offset:      ["Start offset is outside the bounds of the buffer"],
+  invalid_data_view_length:      ["Invalid data view length"],
+  invalid_data_view_accessor_offset:
+                                 ["Offset is outside the bounds of the DataView"],
+
   stack_overflow:                ["Maximum call stack size exceeded"],
   invalid_time_value:            ["Invalid time value"],
   // SyntaxError
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index f518f05..2ca0083 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -202,6 +202,7 @@
 
 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  ASSERT(!target->IsConsString());
   Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
   if (mode == UPDATE_WRITE_BARRIER &&
       host() != NULL &&
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 06273ca..e9188ef 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -108,372 +108,6 @@
 }
 
 
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. An elements backing store is allocated with size initial_capacity
-// and filled with the hole values.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
-                                 Register array_function,
-                                 Register result,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Register scratch3,
-                                 Label* gc_required) {
-  const int initial_capacity = JSArray::kPreallocatedArrayElements;
-  STATIC_ASSERT(initial_capacity >= 0);
-  __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
-  // Allocate the JSArray object together with space for a fixed array with the
-  // requested elements.
-  int size = JSArray::kSize;
-  if (initial_capacity > 0) {
-    size += FixedArray::SizeFor(initial_capacity);
-  }
-  __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
-
-  // Allocated the JSArray. Now initialize the fields except for the elements
-  // array.
-  // result: JSObject
-  // scratch1: initial map
-  // scratch2: start of next object
-  __ sw(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
-  __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
-  __ sw(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
-  // Field JSArray::kElementsOffset is initialized later.
-  __ mov(scratch3,  zero_reg);
-  __ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
-
-  if (initial_capacity == 0) {
-    __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
-    return;
-  }
-
-  // Calculate the location of the elements array and set elements array member
-  // of the JSArray.
-  // result: JSObject
-  // scratch2: start of next object
-  __ Addu(scratch1, result, Operand(JSArray::kSize));
-  __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
-
-  // Clear the heap tag on the elements array.
-  __ And(scratch1, scratch1, Operand(~kHeapObjectTagMask));
-
-  // Initialize the FixedArray and fill it with holes. FixedArray length is
-  // stored as a smi.
-  // result: JSObject
-  // scratch1: elements array (untagged)
-  // scratch2: start of next object
-  __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
-  STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
-  __ sw(scratch3, MemOperand(scratch1));
-  __ Addu(scratch1, scratch1, kPointerSize);
-  __ li(scratch3,  Operand(Smi::FromInt(initial_capacity)));
-  STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
-  __ sw(scratch3, MemOperand(scratch1));
-  __ Addu(scratch1, scratch1, kPointerSize);
-
-  // Fill the FixedArray with the hole value. Inline the code if short.
-  STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
-  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
-  static const int kLoopUnfoldLimit = 4;
-  if (initial_capacity <= kLoopUnfoldLimit) {
-    for (int i = 0; i < initial_capacity; i++) {
-      __ sw(scratch3, MemOperand(scratch1, i * kPointerSize));
-    }
-  } else {
-    Label loop, entry;
-    __ Addu(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
-    __ Branch(&entry);
-    __ bind(&loop);
-    __ sw(scratch3, MemOperand(scratch1));
-    __ Addu(scratch1, scratch1, kPointerSize);
-    __ bind(&entry);
-    __ Branch(&loop, lt, scratch1, Operand(scratch2));
-  }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array_storage and elements_array_end
-// (see  below for when that is not the case). If the parameter fill_with_holes
-// is true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array_storage is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
-                            Register array_function,  // Array function.
-                            Register array_size,  // As a smi, cannot be 0.
-                            Register result,
-                            Register elements_array_storage,
-                            Register elements_array_end,
-                            Register scratch1,
-                            Register scratch2,
-                            bool fill_with_hole,
-                            Label* gc_required) {
-  // Load the initial map from the array function.
-  __ LoadInitialArrayMap(array_function, scratch2,
-                         elements_array_storage, fill_with_hole);
-
-  if (FLAG_debug_code) {  // Assert that array size is not zero.
-    __ Assert(
-        ne, "array size is unexpectedly 0", array_size, Operand(zero_reg));
-  }
-
-  // Allocate the JSArray object together with space for a FixedArray with the
-  // requested number of elements.
-  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ li(elements_array_end,
-        (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
-  __ sra(scratch1, array_size, kSmiTagSize);
-  __ Addu(elements_array_end, elements_array_end, scratch1);
-  __ Allocate(elements_array_end,
-              result,
-              scratch1,
-              scratch2,
-              gc_required,
-              static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
-  // Allocated the JSArray. Now initialize the fields except for the elements
-  // array.
-  // result: JSObject
-  // elements_array_storage: initial map
-  // array_size: size of array (smi)
-  __ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
-  __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
-  __ sw(elements_array_storage,
-         FieldMemOperand(result, JSArray::kPropertiesOffset));
-  // Field JSArray::kElementsOffset is initialized later.
-  __ sw(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
-
-  // Calculate the location of the elements array and set elements array member
-  // of the JSArray.
-  // result: JSObject
-  // array_size: size of array (smi)
-  __ Addu(elements_array_storage, result, Operand(JSArray::kSize));
-  __ sw(elements_array_storage,
-         FieldMemOperand(result, JSArray::kElementsOffset));
-
-  // Clear the heap tag on the elements array.
-  __ And(elements_array_storage,
-          elements_array_storage,
-          Operand(~kHeapObjectTagMask));
-  // Initialize the fixed array and fill it with holes. FixedArray length is
-  // stored as a smi.
-  // result: JSObject
-  // elements_array_storage: elements array (untagged)
-  // array_size: size of array (smi)
-  __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
-  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
-  __ sw(scratch1, MemOperand(elements_array_storage));
-  __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
-
-  // Length of the FixedArray is the number of pre-allocated elements if
-  // the actual JSArray has length 0 and the size of the JSArray for non-empty
-  // JSArrays. The length of a FixedArray is stored as a smi.
-  STATIC_ASSERT(kSmiTag == 0);
-
-  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
-  __ sw(array_size, MemOperand(elements_array_storage));
-  __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
-
-  // Calculate elements array and elements array end.
-  // result: JSObject
-  // elements_array_storage: elements array element storage
-  // array_size: smi-tagged size of elements array
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ sll(elements_array_end, array_size, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(elements_array_end, elements_array_storage, elements_array_end);
-
-  // Fill the allocated FixedArray with the hole value if requested.
-  // result: JSObject
-  // elements_array_storage: elements array element storage
-  // elements_array_end: start of next object
-  if (fill_with_hole) {
-    Label loop, entry;
-    __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
-    __ Branch(&entry);
-    __ bind(&loop);
-    __ sw(scratch1, MemOperand(elements_array_storage));
-    __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
-
-    __ bind(&entry);
-    __ Branch(&loop, lt, elements_array_storage, Operand(elements_array_end));
-  }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-//   a0: argc
-//   a1: constructor (built-in Array function)
-//   ra: return address
-//   sp[0]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in a1 needs to be preserved for
-// entering the generic code. In both cases argc in a0 needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// construct call and normal call.
-void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
-  Counters* counters = masm->isolate()->counters();
-  Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
-      has_non_smi_element, finish, cant_transition_map, not_double;
-
-  // Check for array construction with zero arguments or one.
-  __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
-  // Handle construction of an empty array.
-  __ bind(&empty_array);
-  AllocateEmptyJSArray(masm,
-                       a1,
-                       a2,
-                       a3,
-                       t0,
-                       t1,
-                       call_generic_code);
-  __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
-  // Set up return value, remove receiver from stack and return.
-  __ Addu(sp, sp, Operand(kPointerSize));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a2);
-
-  // Check for one argument. Bail out if argument is not smi or if it is
-  // negative.
-  __ bind(&argc_one_or_more);
-  __ Branch(&argc_two_or_more, ne, a0, Operand(1));
-
-  STATIC_ASSERT(kSmiTag == 0);
-  __ lw(a2, MemOperand(sp));  // Get the argument from the stack.
-  __ Branch(&not_empty_array, ne, a2, Operand(zero_reg));
-  __ Drop(1);  // Adjust stack.
-  __ mov(a0, zero_reg);  // Treat this as a call with argc of zero.
-  __ Branch(&empty_array);
-
-  __ bind(&not_empty_array);
-  __ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
-  __ Branch(call_generic_code, eq, a3, Operand(zero_reg));
-
-  // Handle construction of an empty array of a certain size. Bail out if size
-  // is too large to actually allocate an elements array.
-  STATIC_ASSERT(kSmiTag == 0);
-  __ Branch(call_generic_code, Ugreater_equal, a2,
-            Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
-
-  // a0: argc
-  // a1: constructor
-  // a2: array_size (smi)
-  // sp[0]: argument
-  AllocateJSArray(masm,
-                  a1,
-                  a2,
-                  a3,
-                  t0,
-                  t1,
-                  t2,
-                  t3,
-                  true,
-                  call_generic_code);
-  __ IncrementCounter(counters->array_function_native(), 1, a2, t0);
-
-  // Set up return value, remove receiver and argument from stack and return.
-  __ Addu(sp, sp, Operand(2 * kPointerSize));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a3);
-
-  // Handle construction of an array from a list of arguments.
-  __ bind(&argc_two_or_more);
-  __ sll(a2, a0, kSmiTagSize);  // Convert argc to a smi.
-
-  // a0: argc
-  // a1: constructor
-  // a2: array_size (smi)
-  // sp[0]: last argument
-  AllocateJSArray(masm,
-                  a1,
-                  a2,
-                  a3,
-                  t0,
-                  t1,
-                  t2,
-                  t3,
-                  false,
-                  call_generic_code);
-  __ IncrementCounter(counters->array_function_native(), 1, a2, t2);
-
-  // Fill arguments as array elements. Copy from the top of the stack (last
-  // element) to the array backing store filling it backwards. Note:
-  // elements_array_end points after the backing store.
-  // a0: argc
-  // a3: JSArray
-  // t0: elements_array storage start (untagged)
-  // t1: elements_array_end (untagged)
-  // sp[0]: last argument
-
-  Label loop, entry;
-  __ Branch(USE_DELAY_SLOT, &entry);
-  __ mov(t3, sp);
-  __ bind(&loop);
-  __ lw(a2, MemOperand(t3));
-  if (FLAG_smi_only_arrays) {
-    __ JumpIfNotSmi(a2, &has_non_smi_element);
-  }
-  __ Addu(t3, t3, kPointerSize);
-  __ Addu(t1, t1, -kPointerSize);
-  __ sw(a2, MemOperand(t1));
-  __ bind(&entry);
-  __ Branch(&loop, lt, t0, Operand(t1));
-
-  __ bind(&finish);
-  __ mov(sp, t3);
-
-  // Remove caller arguments and receiver from the stack, setup return value and
-  // return.
-  // a0: argc
-  // a3: JSArray
-  // sp[0]: receiver
-  __ Addu(sp, sp, Operand(kPointerSize));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a3);
-
-  __ bind(&has_non_smi_element);
-  // Double values are handled by the runtime.
-  __ CheckMap(
-      a2, t5, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
-  __ bind(&cant_transition_map);
-  __ UndoAllocationInNewSpace(a3, t0);
-  __ Branch(call_generic_code);
-
-  __ bind(&not_double);
-  // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
-  // a3: JSArray
-  __ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         a2,
-                                         t5,
-                                         &cant_transition_map);
-  __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
-  __ RecordWriteField(a3,
-                      HeapObject::kMapOffset,
-                      a2,
-                      t5,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  Label loop2;
-  __ bind(&loop2);
-  __ lw(a2, MemOperand(t3));
-  __ Addu(t3, t3, kPointerSize);
-  __ Subu(t1, t1, kPointerSize);
-  __ sw(a2, MemOperand(t1));
-  __ Branch(&loop2, lt, t0, Operand(t1));
-  __ Branch(&finish);
-}
-
-
 void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0     : number of arguments
@@ -498,20 +132,9 @@
 
   // Run the native code for the InternalArray function called as a normal
   // function.
-  if (FLAG_optimize_constructed_arrays) {
-    // Tail call a stub.
-    InternalArrayConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    ArrayNativeCode(masm, &generic_array_code);
-
-    // Jump to the generic array code if the specialized code cannot handle the
-    // construction.
-    __ bind(&generic_array_code);
-    Handle<Code> array_code =
-        masm->isolate()->builtins()->InternalArrayCodeGeneric();
-    __ Jump(array_code, RelocInfo::CODE_TARGET);
-  }
+  // Tail call a stub.
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
 
@@ -538,58 +161,13 @@
   }
 
   // Run the native code for the Array function called as a normal function.
-  if (FLAG_optimize_constructed_arrays) {
-    // Tail call a stub.
-    Handle<Object> undefined_sentinel(
-        masm->isolate()->heap()->undefined_value(),
-        masm->isolate());
-    __ li(a2, Operand(undefined_sentinel));
-    ArrayConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    ArrayNativeCode(masm, &generic_array_code);
-
-    // Jump to the generic array code if the specialized code cannot handle
-    // the construction.
-    __ bind(&generic_array_code);
-    Handle<Code> array_code =
-        masm->isolate()->builtins()->ArrayCodeGeneric();
-    __ Jump(array_code, RelocInfo::CODE_TARGET);
-  }
-}
-
-
-void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a0     : number of arguments
-  //  -- a1     : constructor function
-  //  -- a2     : type info cell
-  //  -- ra     : return address
-  //  -- sp[...]: constructor arguments
-  // -----------------------------------
-
-  if (FLAG_debug_code) {
-    // The array construct code is only set for the builtin and internal
-    // Array functions which always have a map.
-    // Initial map for the builtin Array function should be a map.
-    __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
-    __ And(t0, a3, Operand(kSmiTagMask));
-    __ Assert(ne, "Unexpected initial map for Array function (3)",
-              t0, Operand(zero_reg));
-    __ GetObjectType(a3, a3, t0);
-    __ Assert(eq, "Unexpected initial map for Array function (4)",
-              t0, Operand(MAP_TYPE));
-  }
-  Label generic_constructor;
-  // Run the native code for the Array function called as a constructor.
-  ArrayNativeCode(masm, &generic_constructor);
-
-  // Jump to the generic construct code in case the specialized code cannot
-  // handle the construction.
-  __ bind(&generic_constructor);
-  Handle<Code> generic_construct_stub =
-      masm->isolate()->builtins()->JSConstructStubGeneric();
-  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+  // Tail call a stub.
+  Handle<Object> undefined_sentinel(
+      masm->isolate()->heap()->undefined_value(),
+      masm->isolate());
+  __ li(a2, Operand(undefined_sentinel));
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
 
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index cb59779..2da8fc0 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -1181,12 +1181,17 @@
 
     // Now that we have the types we might as well check for
     // internalized-internalized.
-    // Ensure that no non-strings have the internalized bit set.
-    STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask);
+    Label not_internalized;
     STATIC_ASSERT(kInternalizedTag != 0);
-    __ And(t2, a2, Operand(a3));
-    __ And(t0, t2, Operand(kIsInternalizedMask));
-    __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
+    __ And(t2, a2, Operand(kIsNotStringMask | kIsInternalizedMask));
+    __ Branch(&not_internalized, ne, t2,
+        Operand(kInternalizedTag | kStringTag));
+
+    __ And(a3, a3, Operand(kIsNotStringMask | kIsInternalizedMask));
+    __ Branch(&return_not_equal, eq, a3,
+        Operand(kInternalizedTag | kStringTag));
+
+    __ bind(&not_internalized);
 }
 
 
@@ -1220,8 +1225,7 @@
   ASSERT((lhs.is(a0) && rhs.is(a1)) ||
          (lhs.is(a1) && rhs.is(a0)));
 
-  // a2 is object type of lhs.
-  // Ensure that no non-strings have the internalized bit set.
+  // a2 is object type of rhs.
   Label object_test;
   STATIC_ASSERT(kInternalizedTag != 0);
   __ And(at, a2, Operand(kIsNotStringMask));
@@ -2326,7 +2330,7 @@
 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
   Label right_arg_changed, call_runtime;
 
-  if (op_ == Token::MOD && has_fixed_right_arg_) {
+  if (op_ == Token::MOD && encoded_right_arg_.has_value) {
     // It is guaranteed that the value will fit into a Smi, because if it
     // didn't, we wouldn't be here, see BinaryOp_Patch.
     __ Branch(&right_arg_changed,
@@ -2541,7 +2545,7 @@
         // to type transition.
 
       } else {
-        if (has_fixed_right_arg_) {
+        if (encoded_right_arg_.has_value) {
           __ Move(f16, fixed_right_arg_value());
           __ BranchF(&transition, NULL, ne, f14, f16);
         }
@@ -3339,9 +3343,7 @@
   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
   RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
-  if (FLAG_optimize_constructed_arrays) {
-    ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
-  }
+  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
 }
 
 
@@ -5018,55 +5020,12 @@
 }
 
 
-static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
-  // Cache the called function in a global property cell.  Cache states
-  // are uninitialized, monomorphic (indicated by a JSFunction), and
-  // megamorphic.
-  // a1 : the function to call
-  // a2 : cache cell for call target
-  Label done;
-
-  ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
-            masm->isolate()->heap()->undefined_value());
-  ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
-            masm->isolate()->heap()->the_hole_value());
-
-  // Load the cache state into a3.
-  __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
-
-  // A monomorphic cache hit or an already megamorphic state: invoke the
-  // function without changing the state.
-  __ Branch(&done, eq, a3, Operand(a1));
-  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  __ Branch(&done, eq, a3, Operand(at));
-
-  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
-  // megamorphic.
-  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-
-  __ Branch(USE_DELAY_SLOT, &done, eq, a3, Operand(at));
-  // An uninitialized cache is patched with the function.
-  // Store a1 in the delay slot. This may or may not get overwritten depending
-  // on the result of the comparison.
-  __ sw(a1, FieldMemOperand(a2, Cell::kValueOffset));
-  // No need for a write barrier here - cells are rescanned.
-
-  // MegamorphicSentinel is an immortal immovable object (undefined) so no
-  // write-barrier is needed.
-  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
-
-  __ bind(&done);
-}
-
-
 static void GenerateRecordCallTarget(MacroAssembler* masm) {
   // Cache the called function in a global property cell.  Cache states
   // are uninitialized, monomorphic (indicated by a JSFunction), and
   // megamorphic.
   // a1 : the function to call
   // a2 : cache cell for call target
-  ASSERT(FLAG_optimize_constructed_arrays);
   Label initialize, done, miss, megamorphic, not_array_function;
 
   ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
@@ -5166,11 +5125,7 @@
   __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
 
   if (RecordCallTarget()) {
-    if (FLAG_optimize_constructed_arrays) {
-      GenerateRecordCallTarget(masm);
-    } else {
-      GenerateRecordCallTargetNoArray(masm);
-    }
+    GenerateRecordCallTarget(masm);
   }
 
   // Fast-case: Invoke the function now.
@@ -5244,15 +5199,11 @@
   __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
 
   if (RecordCallTarget()) {
-    if (FLAG_optimize_constructed_arrays) {
-      GenerateRecordCallTarget(masm);
-    } else {
-      GenerateRecordCallTargetNoArray(masm);
-    }
+    GenerateRecordCallTarget(masm);
   }
 
   // Jump to the function-specific construct stub.
-  Register jmp_reg = FLAG_optimize_constructed_arrays ? a3 : a2;
+  Register jmp_reg = a3;
   __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
   __ lw(jmp_reg, FieldMemOperand(jmp_reg,
                                  SharedFunctionInfo::kConstructStubOffset));
@@ -6665,9 +6616,13 @@
   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kInternalizedTag != 0);
-  __ And(tmp1, tmp1, Operand(tmp2));
-  __ And(tmp1, tmp1, kIsInternalizedMask);
-  __ Branch(&miss, eq, tmp1, Operand(zero_reg));
+
+  __ And(tmp1, tmp1, Operand(kIsNotStringMask | kIsInternalizedMask));
+  __ Branch(&miss, ne, tmp1, Operand(kInternalizedTag | kStringTag));
+
+  __ And(tmp2, tmp2, Operand(kIsNotStringMask | kIsInternalizedMask));
+  __ Branch(&miss, ne, tmp2, Operand(kInternalizedTag | kStringTag));
+
   // Make sure a0 is non-zero. At this point input operands are
   // guaranteed to be non-zero.
   ASSERT(right.is(a0));
@@ -6707,17 +6662,8 @@
   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
 
-  Label succeed1;
-  __ And(at, tmp1, Operand(kIsInternalizedMask));
-  __ Branch(&succeed1, ne, at, Operand(zero_reg));
-  __ Branch(&miss, ne, tmp1, Operand(SYMBOL_TYPE));
-  __ bind(&succeed1);
-
-  Label succeed2;
-  __ And(at, tmp2, Operand(kIsInternalizedMask));
-  __ Branch(&succeed2, ne, at, Operand(zero_reg));
-  __ Branch(&miss, ne, tmp2, Operand(SYMBOL_TYPE));
-  __ bind(&succeed2);
+  __ JumpIfNotUniqueName(tmp1, &miss);
+  __ JumpIfNotUniqueName(tmp2, &miss);
 
   // Use a0 as result
   __ mov(v0, a0);
@@ -6780,7 +6726,8 @@
   // Handle not identical strings.
 
   // Check that both strings are internalized strings. If they are, we're done
-  // because we already know they are not identical.
+  // because we already know they are not identical. We know they are both
+  // strings.
   if (equality) {
     ASSERT(GetCondition() == eq);
     STATIC_ASSERT(kInternalizedTag != 0);
@@ -6985,10 +6932,7 @@
     __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
     __ lbu(entity_name,
            FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
-    __ And(scratch0, entity_name, Operand(kIsInternalizedMask));
-    __ Branch(&good, ne, scratch0, Operand(zero_reg));
-    __ Branch(miss, ne, entity_name, Operand(SYMBOL_TYPE));
-
+    __ JumpIfNotUniqueName(entity_name, miss);
     __ bind(&good);
 
     // Restore the properties.
@@ -7162,14 +7106,10 @@
 
     if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
       // Check if the entry name is not a unique name.
-      Label cont;
       __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
       __ lbu(entry_key,
              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
-      __ And(result, entry_key, Operand(kIsInternalizedMask));
-      __ Branch(&cont, ne, result, Operand(zero_reg));
-      __ Branch(&maybe_in_dictionary, ne, entry_key, Operand(SYMBOL_TYPE));
-      __ bind(&cont);
+      __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
     }
   }
 
@@ -7773,50 +7713,37 @@
     __ bind(&okay_here);
   }
 
-  if (FLAG_optimize_constructed_arrays) {
-    Label no_info, switch_ready;
-    // Get the elements kind and case on that.
-    __ Branch(&no_info, eq, a2, Operand(undefined_sentinel));
-    __ lw(a3, FieldMemOperand(a2, PropertyCell::kValueOffset));
-    __ JumpIfNotSmi(a3, &no_info);
-    __ SmiUntag(a3);
-    __ jmp(&switch_ready);
-    __ bind(&no_info);
-    __ li(a3, Operand(GetInitialFastElementsKind()));
-    __ bind(&switch_ready);
+  Label no_info, switch_ready;
+  // Get the elements kind and case on that.
+  __ Branch(&no_info, eq, a2, Operand(undefined_sentinel));
+  __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
+  __ JumpIfNotSmi(a3, &no_info);
+  __ SmiUntag(a3);
+  __ jmp(&switch_ready);
+  __ bind(&no_info);
+  __ li(a3, Operand(GetInitialFastElementsKind()));
+  __ bind(&switch_ready);
 
-    if (argument_count_ == ANY) {
-      Label not_zero_case, not_one_case;
-      __ And(at, a0, a0);
-      __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
-      CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+  if (argument_count_ == ANY) {
+    Label not_zero_case, not_one_case;
+    __ And(at, a0, a0);
+    __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
+    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
 
-      __ bind(&not_zero_case);
-      __ Branch(&not_one_case, gt, a0, Operand(1));
-      CreateArrayDispatchOneArgument(masm);
+    __ bind(&not_zero_case);
+    __ Branch(&not_one_case, gt, a0, Operand(1));
+    CreateArrayDispatchOneArgument(masm);
 
-      __ bind(&not_one_case);
-      CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
-    } else if (argument_count_ == NONE) {
-      CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-    } else if (argument_count_ == ONE) {
-      CreateArrayDispatchOneArgument(masm);
-    } else if (argument_count_ == MORE_THAN_ONE) {
-      CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
-    } else {
-     UNREACHABLE();
-    }
+    __ bind(&not_one_case);
+    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
+  } else if (argument_count_ == NONE) {
+    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+  } else if (argument_count_ == ONE) {
+    CreateArrayDispatchOneArgument(masm);
+  } else if (argument_count_ == MORE_THAN_ONE) {
+    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
   } else {
-     Label generic_constructor;
-     // Run the native code for the Array function called as a constructor.
-     ArrayNativeCode(masm, &generic_constructor);
-
-     // Jump to the generic construct code in case the specialized code cannot
-     // handle the construction.
-     __ bind(&generic_constructor);
-     Handle<Code> generic_construct_stub =
-         masm->isolate()->builtins()->JSConstructStubGeneric();
-     __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+    UNREACHABLE();
   }
 }
 
@@ -7877,43 +7804,30 @@
         t0, Operand(MAP_TYPE));
   }
 
-  if (FLAG_optimize_constructed_arrays) {
-    // Figure out the right elements kind.
-    __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+  // Figure out the right elements kind.
+  __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
 
-    // Load the map's "bit field 2" into a3. We only need the first byte,
-    // but the following bit field extraction takes care of that anyway.
-    __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
-    // Retrieve elements_kind from bit field 2.
-    __ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount);
+  // Load the map's "bit field 2" into a3. We only need the first byte,
+  // but the following bit field extraction takes care of that anyway.
+  __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
+  // Retrieve elements_kind from bit field 2.
+  __ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount);
 
-    if (FLAG_debug_code) {
-      Label done;
-      __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
-      __ Assert(
-          eq, "Invalid ElementsKind for InternalArray or InternalPackedArray",
-          a3, Operand(FAST_HOLEY_ELEMENTS));
-      __ bind(&done);
-    }
-
-    Label fast_elements_case;
-    __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
-    GenerateCase(masm, FAST_HOLEY_ELEMENTS);
-
-    __ bind(&fast_elements_case);
-    GenerateCase(masm, FAST_ELEMENTS);
-  } else {
-    Label generic_constructor;
-    // Run the native code for the Array function called as constructor.
-    ArrayNativeCode(masm, &generic_constructor);
-
-    // Jump to the generic construct code in case the specialized code cannot
-    // handle the construction.
-    __ bind(&generic_constructor);
-    Handle<Code> generic_construct_stub =
-        masm->isolate()->builtins()->JSConstructStubGeneric();
-    __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+  if (FLAG_debug_code) {
+    Label done;
+    __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
+    __ Assert(
+        eq, "Invalid ElementsKind for InternalArray or InternalPackedArray",
+        a3, Operand(FAST_HOLEY_ELEMENTS));
+    __ bind(&done);
   }
+
+  Label fast_elements_case;
+  __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
+  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+  __ bind(&fast_elements_case);
+  GenerateCase(masm, FAST_ELEMENTS);
 }
 
 
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
index 540caa9..d070b56 100644
--- a/src/mips/frames-mips.cc
+++ b/src/mips/frames-mips.cc
@@ -33,20 +33,12 @@
 #include "assembler.h"
 #include "assembler-mips.h"
 #include "assembler-mips-inl.h"
-#include "frames-inl.h"
-#include "mips/assembler-mips-inl.h"
-#include "macro-assembler.h"
-#include "macro-assembler-mips.h"
+#include "frames.h"
 
 namespace v8 {
 namespace internal {
 
 
-Address ExitFrame::ComputeStackPointer(Address fp) {
-  return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
 Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
 Register JavaScriptFrame::context_register() { return cp; }
 
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index df83019..d416a3b 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1694,11 +1694,11 @@
         ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
-        if (key->handle()->IsInternalizedString()) {
+        if (key->value()->IsInternalizedString()) {
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
             __ mov(a0, result_register());
-            __ li(a2, Operand(key->handle()));
+            __ li(a2, Operand(key->value()));
             __ lw(a1, MemOperand(sp));
             Handle<Code> ic = is_classic_mode()
                 ? isolate()->builtins()->StoreIC_Initialize()
@@ -1834,10 +1834,7 @@
     Expression* subexpr = subexprs->at(i);
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
-    if (subexpr->AsLiteral() != NULL ||
-        CompileTimeValue::IsCompileTimeValue(subexpr)) {
-      continue;
-    }
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     if (!result_saved) {
       __ push(v0);  // array literal
@@ -2001,14 +1998,28 @@
       __ push(result_register());
       // Fall through.
     case Yield::INITIAL: {
-      VisitForStackValue(expr->generator_object());
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-      __ lw(context_register(),
-            MemOperand(fp, StandardFrameConstants::kContextOffset));
+      Label suspend, continuation, post_runtime, resume;
 
-      Label resume;
-      __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-      __ Branch(&resume, ne, result_register(), Operand(at));
+      __ jmp(&suspend);
+
+      __ bind(&continuation);
+      __ jmp(&resume);
+
+      __ bind(&suspend);
+      VisitForAccumulatorValue(expr->generator_object());
+      ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+      __ li(a1, Operand(Smi::FromInt(continuation.pos())));
+      __ sw(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
+      __ sw(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
+      __ mov(a1, cp);
+      __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
+                          kRAHasBeenSaved, kDontSaveFPRegs);
+      __ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+      __ Branch(&post_runtime, eq, sp, Operand(a1));
+      __ push(v0);  // generator object
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+      __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+      __ bind(&post_runtime);
       __ pop(result_register());
       EmitReturnSequence();
 
@@ -2036,7 +2047,8 @@
       // [sp + 1 * kPointerSize] iter
       // [sp + 0 * kPointerSize] g
 
-      Label l_catch, l_try, l_resume, l_next, l_call, l_loop;
+      Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+      Label l_next, l_call, l_loop;
       // Initial send value is undefined.
       __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
       __ Branch(&l_next);
@@ -2059,14 +2071,23 @@
       __ PushTryHandler(StackHandler::CATCH, expr->index());
       const int handler_size = StackHandlerConstants::kSize;
       __ push(a0);                                       // result
-      __ lw(a3, MemOperand(sp, (0 + 1) * kPointerSize + handler_size));  // g
-      __ push(a3);                                       // g
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+      __ jmp(&l_suspend);
+      __ bind(&l_continuation);
       __ mov(a0, v0);
-      __ lw(context_register(),
-            MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-      __ Branch(&l_resume, ne, a0, Operand(at));
+      __ jmp(&l_resume);
+      __ bind(&l_suspend);
+      const int generator_object_depth = kPointerSize + handler_size;
+      __ lw(a0, MemOperand(sp, generator_object_depth));
+      __ push(a0);                                       // g
+      ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+      __ li(a1, Operand(Smi::FromInt(l_continuation.pos())));
+      __ sw(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset));
+      __ sw(cp, FieldMemOperand(a0, JSGeneratorObject::kContextOffset));
+      __ mov(a1, cp);
+      __ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
+                          kRAHasBeenSaved, kDontSaveFPRegs);
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+      __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
       __ pop(v0);                                        // result
       EmitReturnSequence();
       __ mov(a0, v0);
@@ -2142,7 +2163,7 @@
   __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
   Label push_argument_holes, push_frame;
   __ bind(&push_argument_holes);
-  __ Subu(a3, a3, Operand(1));
+  __ Subu(a3, a3, Operand(Smi::FromInt(1)));
   __ Branch(&push_frame, lt, a3, Operand(zero_reg));
   __ push(a2);
   __ jmp(&push_argument_holes);
@@ -2247,7 +2268,7 @@
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
   __ mov(a0, result_register());
-  __ li(a2, Operand(key->handle()));
+  __ li(a2, Operand(key->value()));
   // Call load IC. It has arguments receiver and property name a0 and a2.
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
@@ -2409,7 +2430,7 @@
       VisitForAccumulatorValue(prop->obj());
       __ mov(a1, result_register());
       __ pop(a0);  // Restore value.
-      __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+      __ li(a2, Operand(prop->key()->AsLiteral()->value()));
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->StoreIC_Initialize()
           : isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2540,7 +2561,7 @@
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
   __ mov(a0, result_register());  // Load the value.
-  __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+  __ li(a2, Operand(prop->key()->AsLiteral()->value()));
   __ pop(a1);
 
   Handle<Code> ic = is_classic_mode()
@@ -2818,7 +2839,7 @@
     }
     if (property->key()->IsPropertyName()) {
       EmitCallWithIC(expr,
-                     property->key()->AsLiteral()->handle(),
+                     property->key()->AsLiteral()->value(),
                      RelocInfo::CODE_TARGET);
     } else {
       EmitKeyedCallWithIC(expr, property->key());
@@ -3423,7 +3444,7 @@
   ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   ASSERT_NE(NULL, args->at(1)->AsLiteral());
-  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
@@ -3855,7 +3876,7 @@
   ASSERT_EQ(2, args->length());
 
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
-  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
 
   Handle<FixedArray> jsfunction_result_caches(
       isolate()->native_context()->jsfunction_result_caches());
@@ -4530,7 +4551,7 @@
       break;
     case NAMED_PROPERTY: {
       __ mov(a0, result_register());  // Value.
-      __ li(a2, Operand(prop->key()->AsLiteral()->handle()));  // Name.
+      __ li(a2, Operand(prop->key()->AsLiteral()->value()));  // Name.
       __ pop(a1);  // Receiver.
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->StoreIC_Initialize()
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index b30ee31..8a00f60 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -326,7 +326,8 @@
   __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
   __ Branch(index_string, eq, at, Operand(zero_reg));
 
-  // Is the string internalized?
+  // Is the string internalized? We know it's a string, so a single
+  // bit test is enough.
   // map: key map
   __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kInternalizedTag != 0);
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 9649eb9..6751be7 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -62,7 +62,7 @@
 #define __ masm()->
 
 bool LCodeGen::GenerateCode() {
-  HPhase phase("Z_Code generation", chunk());
+  LPhase phase("Z_Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
 
@@ -87,7 +87,7 @@
     RegisterDependentCodeForEmbeddedMaps(code);
   }
   PopulateDeoptimizationData(code);
-  info()->CommitDependentMaps(code);
+  info()->CommitDependencies(code);
 }
 
 
@@ -243,7 +243,6 @@
   if (FLAG_trace && info()->IsOptimizing()) {
     __ CallRuntime(Runtime::kTraceEnter, 0);
   }
-  EnsureSpaceForLazyDeopt();
   return !is_aborted();
 }
 
@@ -271,6 +270,7 @@
 
     instr->CompileToNative(this);
   }
+  EnsureSpaceForLazyDeopt();
   return !is_aborted();
 }
 
@@ -603,27 +603,6 @@
 
   for (int i = 0; i < translation_size; ++i) {
     LOperand* value = environment->values()->at(i);
-    // spilled_registers_ and spilled_double_registers_ are either
-    // both NULL or both set.
-    if (environment->spilled_registers() != NULL && value != NULL) {
-      if (value->IsRegister() &&
-          environment->spilled_registers()[value->index()] != NULL) {
-        translation->MarkDuplicate();
-        AddToTranslation(translation,
-                         environment->spilled_registers()[value->index()],
-                         environment->HasTaggedValueAt(i),
-                         environment->HasUint32ValueAt(i));
-      } else if (
-          value->IsDoubleRegister() &&
-          environment->spilled_double_registers()[value->index()] != NULL) {
-        translation->MarkDuplicate();
-        AddToTranslation(
-            translation,
-            environment->spilled_double_registers()[value->index()],
-            false,
-            false);
-      }
-    }
 
     // TODO(mstarzinger): Introduce marker operands to indicate that this value
     // is not present and must be reconstructed from the deoptimizer. Currently
@@ -633,12 +612,6 @@
       translation->BeginArgumentsObject(arguments_count);
       for (int i = 0; i < arguments_count; ++i) {
         LOperand* value = environment->values()->at(translation_size + i);
-        ASSERT(environment->spilled_registers() == NULL ||
-               !value->IsRegister() ||
-               environment->spilled_registers()[value->index()] == NULL);
-        ASSERT(environment->spilled_registers() == NULL ||
-               !value->IsDoubleRegister() ||
-               environment->spilled_double_registers()[value->index()] == NULL);
         AddToTranslation(translation,
                          value,
                          environment->HasTaggedValueAt(translation_size + i),
@@ -706,6 +679,7 @@
                                RelocInfo::Mode mode,
                                LInstruction* instr,
                                SafepointMode safepoint_mode) {
+  EnsureSpaceForLazyDeopt();
   ASSERT(instr != NULL);
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
@@ -1100,7 +1074,8 @@
 
 
 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
-  // Nothing to do.
+  // Record the address of the first unknown OSR value as the place to enter.
+  if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
 }
 
 
@@ -1136,9 +1111,37 @@
     __ And(result_reg, scratch, divisor - 1);
     __ bind(&done);
 
-  } else {
-    // TODO(svenpanne) Add right->has_fixed_right_arg() case.
+  } else if (hmod->fixed_right_arg().has_value) {
+    const Register scratch = scratch0();
+    const Register left_reg = ToRegister(instr->left());
+    const Register result_reg = ToRegister(instr->result());
 
+    Register right_reg = EmitLoadRegister(instr->right(), scratch);
+
+    int32_t divisor = hmod->fixed_right_arg().value;
+    ASSERT(IsPowerOf2(divisor));
+
+    // Check if our assumption of a fixed right operand still holds.
+    DeoptimizeIf(ne, instr->environment(), right_reg, Operand(divisor));
+
+    Label left_is_not_negative, done;
+    if (left->CanBeNegative()) {
+      __ Branch(USE_DELAY_SLOT, &left_is_not_negative,
+                ge, left_reg, Operand(zero_reg));
+      __ subu(result_reg, zero_reg, left_reg);
+      __ And(result_reg, result_reg, divisor - 1);
+      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+        DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+      }
+      __ Branch(USE_DELAY_SLOT, &done);
+      __ subu(result_reg, zero_reg, result_reg);
+    }
+
+    __ bind(&left_is_not_negative);
+    __ And(result_reg, left_reg, divisor - 1);
+    __ bind(&done);
+
+  } else {
     const Register scratch = scratch0();
     const Register left_reg = ToRegister(instr->left());
     const Register result_reg = ToRegister(instr->result());
@@ -1706,9 +1709,11 @@
   Register map = ToRegister(instr->temp());
   Label done;
 
-  // If the object is a smi return the object.
-  __ Move(result, input);
-  __ JumpIfSmi(input, &done);
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    // If the object is a smi return the object.
+    __ Move(result, input);
+    __ JumpIfSmi(input, &done);
+  }
 
   // If the object is not a value type, return the object.
   __ GetObjectType(input, map, map);
@@ -1970,12 +1975,13 @@
   return -1;
 }
 
-
-void LCodeGen::EmitBranch(int left_block, int right_block,
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr,
                           Condition cc, Register src1, const Operand& src2) {
+  int right_block = instr->FalseDestination(chunk_);
+  int left_block = instr->TrueDestination(chunk_);
+
   int next_block = GetNextEmittedBlock();
-  right_block = chunk_->LookupDestination(right_block);
-  left_block = chunk_->LookupDestination(left_block);
   if (right_block == left_block) {
     EmitGoto(left_block);
   } else if (left_block == next_block) {
@@ -1990,11 +1996,13 @@
 }
 
 
-void LCodeGen::EmitBranchF(int left_block, int right_block,
+template<class InstrType>
+void LCodeGen::EmitBranchF(InstrType instr,
                            Condition cc, FPURegister src1, FPURegister src2) {
+  int right_block = instr->FalseDestination(chunk_);
+  int left_block = instr->TrueDestination(chunk_);
+
   int next_block = GetNextEmittedBlock();
-  right_block = chunk_->LookupDestination(right_block);
-  left_block = chunk_->LookupDestination(left_block);
   if (right_block == left_block) {
     EmitGoto(left_block);
   } else if (left_block == next_block) {
@@ -2015,19 +2023,16 @@
 
 
 void LCodeGen::DoBranch(LBranch* instr) {
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
   Representation r = instr->hydrogen()->value()->representation();
   if (r.IsInteger32() || r.IsSmi()) {
     ASSERT(!info()->IsStub());
     Register reg = ToRegister(instr->value());
-    EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+    EmitBranch(instr, ne, reg, Operand(zero_reg));
   } else if (r.IsDouble()) {
     ASSERT(!info()->IsStub());
     DoubleRegister reg = ToDoubleRegister(instr->value());
     // Test the double value. Zero and NaN are false.
-    EmitBranchF(true_block, false_block, nue, reg, kDoubleRegZero);
+    EmitBranchF(instr, nue, reg, kDoubleRegZero);
   } else {
     ASSERT(r.IsTagged());
     Register reg = ToRegister(instr->value());
@@ -2035,40 +2040,50 @@
     if (type.IsBoolean()) {
       ASSERT(!info()->IsStub());
       __ LoadRoot(at, Heap::kTrueValueRootIndex);
-      EmitBranch(true_block, false_block, eq, reg, Operand(at));
+      EmitBranch(instr, eq, reg, Operand(at));
     } else if (type.IsSmi()) {
       ASSERT(!info()->IsStub());
-      EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+      EmitBranch(instr, ne, reg, Operand(zero_reg));
+    } else if (type.IsJSArray()) {
+      ASSERT(!info()->IsStub());
+      EmitBranch(instr, al, zero_reg, Operand(zero_reg));
+    } else if (type.IsHeapNumber()) {
+      ASSERT(!info()->IsStub());
+      DoubleRegister dbl_scratch = double_scratch0();
+      __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+      // Test the double value. Zero and NaN are false.
+      EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
+    } else if (type.IsString()) {
+      ASSERT(!info()->IsStub());
+      __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
+      EmitBranch(instr, ne, at, Operand(zero_reg));
     } else {
-      Label* true_label = chunk_->GetAssemblyLabel(true_block);
-      Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
 
       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
         // undefined -> false.
         __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-        __ Branch(false_label, eq, reg, Operand(at));
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
       }
       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
         // Boolean -> its value.
         __ LoadRoot(at, Heap::kTrueValueRootIndex);
-        __ Branch(true_label, eq, reg, Operand(at));
+        __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
         __ LoadRoot(at, Heap::kFalseValueRootIndex);
-        __ Branch(false_label, eq, reg, Operand(at));
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
       }
       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
         // 'null' -> false.
         __ LoadRoot(at, Heap::kNullValueRootIndex);
-        __ Branch(false_label, eq, reg, Operand(at));
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
       }
 
       if (expected.Contains(ToBooleanStub::SMI)) {
         // Smis: 0 -> false, all other -> true.
-        __ Branch(false_label, eq, reg, Operand(zero_reg));
-        __ JumpIfSmi(reg, true_label);
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
+        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
       } else if (expected.NeedsMap()) {
         // If we need a map later and have a Smi -> deopt.
         __ And(at, reg, Operand(kSmiTagMask));
@@ -2082,14 +2097,15 @@
           // Undetectable -> false.
           __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
           __ And(at, at, Operand(1 << Map::kIsUndetectable));
-          __ Branch(false_label, ne, at, Operand(zero_reg));
+          __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
         }
       }
 
       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
         // spec object -> true.
         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
-        __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+        __ Branch(instr->TrueLabel(chunk_),
+                  ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
       }
 
       if (expected.Contains(ToBooleanStub::STRING)) {
@@ -2098,8 +2114,8 @@
         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
         __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
         __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
-        __ Branch(true_label, ne, at, Operand(zero_reg));
-        __ Branch(false_label);
+        __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
+        __ Branch(instr->FalseLabel(chunk_));
         __ bind(&not_string);
       }
 
@@ -2107,7 +2123,7 @@
         // Symbol value -> true.
         const Register scratch = scratch1();
         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
-        __ Branch(true_label, eq, scratch, Operand(SYMBOL_TYPE));
+        __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
       }
 
       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
@@ -2117,14 +2133,18 @@
         __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
         __ Branch(&not_heap_number, ne, map, Operand(at));
         __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
-        __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
+        __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+                   ne, dbl_scratch, kDoubleRegZero);
         // Falls through if dbl_scratch == 0.
-        __ Branch(false_label);
+        __ Branch(instr->FalseLabel(chunk_));
         __ bind(&not_heap_number);
       }
 
-      // We've seen something for the first time -> deopt.
-      DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+      if (!expected.IsGeneric()) {
+        // We've seen something for the first time -> deopt.
+        // This can only happen if we are not generic already.
+        DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+      }
     }
   }
 }
@@ -2132,7 +2152,7 @@
 
 void LCodeGen::EmitGoto(int block) {
   if (!IsNextEmittedBlock(block)) {
-    __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
+    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
   }
 }
 
@@ -2173,18 +2193,14 @@
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-
   Condition cond = TokenToCondition(instr->op(), false);
 
   if (left->IsConstantOperand() && right->IsConstantOperand()) {
     // We can statically evaluate the comparison.
     double left_val = ToDouble(LConstantOperand::cast(left));
     double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block =
-      EvalComparison(instr->op(), left_val, right_val) ? true_block
-                                                       : false_block;
+    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
     EmitGoto(next_block);
   } else {
     if (instr->is_double()) {
@@ -2195,10 +2211,10 @@
 
       // If a NaN is involved, i.e. the result is unordered,
       // jump to false block label.
-      __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
+      __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
                  left_reg, right_reg);
 
-      EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
+      EmitBranchF(instr, cond, left_reg, right_reg);
     } else {
       Register cmp_left;
       Operand cmp_right = Operand(0);
@@ -2228,7 +2244,7 @@
         cmp_right = Operand(ToRegister(right));
       }
 
-      EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
+      EmitBranch(instr, cond, cmp_left, cmp_right);
     }
   }
 }
@@ -2237,20 +2253,15 @@
 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   Register left = ToRegister(instr->left());
   Register right = ToRegister(instr->right());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
 
-  EmitBranch(true_block, false_block, eq, left, Operand(right));
+  EmitBranch(instr, eq, left, Operand(right));
 }
 
 
 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
   Register left = ToRegister(instr->left());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  EmitBranch(true_block, false_block, eq, left,
-             Operand(instr->hydrogen()->right()));
+  EmitBranch(instr, eq, left, Operand(instr->hydrogen()->right()));
 }
 
 
@@ -2285,23 +2296,22 @@
   Register temp1 = ToRegister(instr->temp());
   Register temp2 = scratch0();
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* true_label = chunk_->GetAssemblyLabel(true_block);
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
   Condition true_cond =
-      EmitIsObject(reg, temp1, temp2, false_label, true_label);
+      EmitIsObject(reg, temp1, temp2,
+          instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
 
-  EmitBranch(true_block, false_block, true_cond, temp2,
+  EmitBranch(instr, true_cond, temp2,
              Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
 }
 
 
 Condition LCodeGen::EmitIsString(Register input,
                                  Register temp1,
-                                 Label* is_not_string) {
-  __ JumpIfSmi(input, is_not_string);
+                                 Label* is_not_string,
+                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
+  if (check_needed == INLINE_SMI_CHECK) {
+    __ JumpIfSmi(input, is_not_string);
+  }
   __ GetObjectType(input, temp1, temp1);
 
   return lt;
@@ -2312,25 +2322,21 @@
   Register reg = ToRegister(instr->value());
   Register temp1 = ToRegister(instr->temp());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
+  SmiCheck check_needed =
+      instr->hydrogen()->value()->IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   Condition true_cond =
-      EmitIsString(reg, temp1, false_label);
+      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
 
-  EmitBranch(true_block, false_block, true_cond, temp1,
+  EmitBranch(instr, true_cond, temp1,
              Operand(FIRST_NONSTRING_TYPE));
 }
 
 
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
   Register input_reg = EmitLoadRegister(instr->value(), at);
   __ And(at, input_reg, kSmiTagMask);
-  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+  EmitBranch(instr, eq, at, Operand(zero_reg));
 }
 
 
@@ -2338,14 +2344,13 @@
   Register input = ToRegister(instr->value());
   Register temp = ToRegister(instr->temp());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
   __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
   __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
   __ And(at, temp, Operand(1 << Map::kIsUndetectable));
-  EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
+  EmitBranch(instr, ne, at, Operand(zero_reg));
 }
 
 
@@ -2371,15 +2376,13 @@
 
 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   Token::Value op = instr->op();
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = ComputeCompareCondition(op);
 
-  EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
+  EmitBranch(instr, condition, v0, Operand(zero_reg));
 }
 
 
@@ -2407,16 +2410,12 @@
   Register scratch = scratch0();
   Register input = ToRegister(instr->value());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  __ JumpIfSmi(input, false_label);
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
 
   __ GetObjectType(input, scratch, scratch);
-  EmitBranch(true_block,
-             false_block,
+  EmitBranch(instr,
              BranchCondition(instr->hydrogen()),
              scratch,
              Operand(TestType(instr->hydrogen())));
@@ -2439,13 +2438,10 @@
   Register input = ToRegister(instr->value());
   Register scratch = scratch0();
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
   __ lw(scratch,
          FieldMemOperand(input, String::kHashFieldOffset));
   __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
-  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+  EmitBranch(instr, eq, at, Operand(zero_reg));
 }
 
 
@@ -2521,26 +2517,19 @@
   Register temp2 = ToRegister(instr->temp());
   Handle<String> class_name = instr->hydrogen()->class_name();
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+                  class_name, input, temp, temp2);
 
-  Label* true_label = chunk_->GetAssemblyLabel(true_block);
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
-  EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
+  EmitBranch(instr, eq, temp, Operand(class_name));
 }
 
 
 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   Register reg = ToRegister(instr->value());
   Register temp = ToRegister(instr->temp());
-  int true_block = instr->true_block_id();
-  int false_block = instr->false_block_id();
 
   __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
-  EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
+  EmitBranch(instr, eq, temp, Operand(instr->map()));
 }
 
 
@@ -2860,9 +2849,9 @@
 
   __ sw(value, target);
   if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
     SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+        instr->hydrogen()->value()->IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     __ RecordWriteContextSlot(context,
                               target.offset(),
                               value,
@@ -3625,6 +3614,7 @@
   Label done;
   __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
   __ mov(result, input);
+  __ subu(result, zero_reg, input);
   // Overflow if result is still negative, i.e. 0x80000000.
   DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
   __ bind(&done);
@@ -4044,12 +4034,9 @@
   ASSERT(ToRegister(instr->result()).is(v0));
 
   __ li(a0, Operand(instr->arity()));
-  if (FLAG_optimize_constructed_arrays) {
-    // No cell in a2 for construct type feedback in optimized code
-    Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
-                                   isolate());
-    __ li(a2, Operand(undefined_value));
-  }
+  // No cell in a2 for construct type feedback in optimized code
+  Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+  __ li(a2, Operand(undefined_value));
   CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
   CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
 }
@@ -4058,7 +4045,6 @@
 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   ASSERT(ToRegister(instr->constructor()).is(a1));
   ASSERT(ToRegister(instr->result()).is(v0));
-  ASSERT(FLAG_optimize_constructed_arrays);
 
   __ li(a0, Operand(instr->arity()));
   __ li(a2, Operand(instr->hydrogen()->property_cell()));
@@ -4153,9 +4139,9 @@
   // Do the store.
   Register value = ToRegister(instr->value());
   ASSERT(!object.is(value));
-  HType type = instr->hydrogen()->value()->type();
   SmiCheck check_needed =
-      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+      instr->hydrogen()->value()->IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   if (access.IsInobject()) {
     __ sw(value, FieldMemOperand(object, offset));
     if (instr->hydrogen()->NeedsWriteBarrier()) {
@@ -4380,9 +4366,9 @@
   __ sw(value, FieldMemOperand(store_base, offset));
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
     SmiCheck check_needed =
-       type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+        instr->hydrogen()->value()->IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
     __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
     __ RecordWrite(elements,
@@ -5133,9 +5119,11 @@
 
 
 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
-  LOperand* input = instr->value();
-  __ And(at, ToRegister(input), Operand(kSmiTagMask));
-  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    LOperand* input = instr->value();
+    __ And(at, ToRegister(input), Operand(kSmiTagMask));
+    DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+  }
 }
 
 
@@ -5293,6 +5281,80 @@
 }
 
 
+void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
+  class DeferredAllocateObject: public LDeferredCode {
+   public:
+    DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LAllocateObject* instr_;
+  };
+
+  DeferredAllocateObject* deferred =
+      new(zone()) DeferredAllocateObject(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->temp());
+  Register scratch2 = ToRegister(instr->temp2());
+  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+  Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+  int instance_size = initial_map->instance_size();
+  ASSERT(initial_map->pre_allocated_property_fields() +
+         initial_map->unused_property_fields() -
+         initial_map->inobject_properties() == 0);
+
+  __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(),
+              TAG_OBJECT);
+
+  __ bind(deferred->exit());
+  if (FLAG_debug_code) {
+    Label is_in_new_space;
+    __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
+    __ Abort("Allocated object is not in new-space");
+    __ bind(&is_in_new_space);
+  }
+
+  // Load the initial map.
+  Register map = scratch;
+  __ LoadHeapObject(map, constructor);
+  __ lw(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Initialize map and fields of the newly allocated object.
+  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+  __ sw(map, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
+  __ sw(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
+  if (initial_map->inobject_properties() != 0) {
+    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+    for (int i = 0; i < initial_map->inobject_properties(); i++) {
+      int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+      __ sw(scratch, FieldMemOperand(result, property_offset));
+    }
+  }
+}
+
+
+void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+  Register result = ToRegister(instr->result());
+  Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+  int instance_size = initial_map->instance_size();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ li(a0, Operand(Smi::FromInt(instance_size)));
+  __ push(a0);
+  CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
 void LCodeGen::DoAllocate(LAllocate* instr) {
   class DeferredAllocate: public LDeferredCode {
    public:
@@ -5460,16 +5522,12 @@
 
 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   Register input = ToRegister(instr->value());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* true_label = chunk_->GetAssemblyLabel(true_block);
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
   Register cmp1 = no_reg;
   Operand cmp2 = Operand(no_reg);
 
-  Condition final_branch_condition = EmitTypeofIs(true_label,
-                                                  false_label,
+  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
+                                                  instr->FalseLabel(chunk_),
                                                   input,
                                                   instr->type_literal(),
                                                   cmp1,
@@ -5479,7 +5537,7 @@
   ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
 
   if (final_branch_condition != kNoCondition) {
-    EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
+    EmitBranch(instr, final_branch_condition, cmp1, cmp2);
   }
 }
 
@@ -5592,12 +5650,10 @@
 
 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
   Register temp1 = ToRegister(instr->temp());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
   EmitIsConstructCall(temp1, scratch0());
 
-  EmitBranch(true_block, false_block, eq, temp1,
+  EmitBranch(instr, eq, temp1,
              Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
 }
 
@@ -5748,15 +5804,15 @@
   // properly registered for deoptimization and records the assembler's PC
   // offset.
   LEnvironment* environment = instr->environment();
-  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
-                                   instr->SpilledDoubleRegisterArray());
 
   // If the environment were already registered, we would have no way of
   // backpatching it with the spill slot operands.
   ASSERT(!environment->HasBeenRegistered());
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-  ASSERT(osr_pc_offset_ == -1);
-  osr_pc_offset_ = masm()->pc_offset();
+
+  // Normally we record the first unknown OSR value as the entrypoint to the OSR
+  // code, but if there were none, record the entrypoint here.
+  if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
 }
 
 
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index 1c99cf9..3d31ef1 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -78,7 +78,6 @@
   Heap* heap() const { return isolate()->heap(); }
   Zone* zone() const { return zone_; }
 
-  // TODO(svenpanne) Use this consistently.
   int LookupDestination(int block_id) const {
     return chunk()->LookupDestination(block_id);
   }
@@ -149,6 +148,7 @@
   void DoDeferredRandom(LRandom* instr);
   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocateObject(LAllocateObject* instr);
   void DoDeferredAllocate(LAllocate* instr);
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
@@ -321,13 +321,13 @@
 
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
-  void EmitBranch(int left_block,
-                  int right_block,
+  template<class InstrType>
+  void EmitBranch(InstrType instr,
                   Condition cc,
                   Register src1,
                   const Operand& src2);
-  void EmitBranchF(int left_block,
-                   int right_block,
+  template<class InstrType>
+  void EmitBranchF(InstrType instr,
                    Condition cc,
                    FPURegister src1,
                    FPURegister src2);
@@ -365,7 +365,8 @@
   // true and false label should be made, to optimize fallthrough.
   Condition EmitIsString(Register input,
                          Register temp1,
-                         Label* is_not_string);
+                         Label* is_not_string,
+                         SmiCheck check_needed);
 
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 1b94332..ba7fbd3 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -41,24 +41,6 @@
 LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
 #undef DEFINE_COMPILE
 
-LOsrEntry::LOsrEntry() {
-  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
-    register_spills_[i] = NULL;
-  }
-  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
-    double_register_spills_[i] = NULL;
-  }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
-                                    LOperand* spill_operand) {
-  ASSERT(spill_operand->IsStackSlot());
-  ASSERT(register_spills_[allocation_index] == NULL);
-  register_spills_[allocation_index] = spill_operand;
-}
-
-
 #ifdef DEBUG
 void LInstruction::VerifyCall() {
   // Call instructions can use only fixed registers as temporaries and
@@ -81,14 +63,6 @@
 #endif
 
 
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
-                                          LOperand* spill_operand) {
-  ASSERT(spill_operand->IsDoubleStackSlot());
-  ASSERT(double_register_spills_[allocation_index] == NULL);
-  double_register_spills_[allocation_index] = spill_operand;
-}
-
-
 void LInstruction::PrintTo(StringStream* stream) {
   stream->Add("%s ", this->Mnemonic());
 
@@ -455,7 +429,7 @@
 LPlatformChunk* LChunkBuilder::Build() {
   ASSERT(is_unused());
   chunk_ = new(zone()) LPlatformChunk(info(), graph());
-  HPhase phase("L_Building chunk", chunk_);
+  LPhase phase("L_Building chunk", chunk_);
   status_ = BUILDING;
   const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
   for (int i = 0; i < blocks->length(); i++) {
@@ -1003,10 +977,13 @@
 
   LBranch* result = new(zone()) LBranch(UseRegister(value));
   // Tagged values that are not known smis or booleans require a
-  // deoptimization environment.
+  // deoptimization environment. If the instruction is generic no
+  // environment is needed since all cases are handled.
   Representation rep = value->representation();
   HType type = value->type();
-  if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+  ToBooleanStub::Types expected = instr->expected_input_types();
+  if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
+      !expected.IsGeneric()) {
     return AssignEnvironment(result);
   }
   return result;
@@ -1437,6 +1414,10 @@
               instr->CheckFlag(HValue::kBailoutOnMinusZero))
           ? AssignEnvironment(result)
           : result;
+    } else if (instr->fixed_right_arg().has_value) {
+      LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
+                                     UseRegisterAtStart(right));
+      return AssignEnvironment(DefineAsRegister(mod));
     } else {
       LModI* mod = new(zone()) LModI(UseRegister(left),
                                      UseRegister(right),
@@ -1939,7 +1920,7 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
   return AssignEnvironment(new(zone()) LCheckNonSmi(value));
 }
@@ -2338,6 +2319,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+  info()->MarkAsDeferredCalling();
+  LAllocateObject* result =
+      new(zone()) LAllocateObject(TempRegister(), TempRegister());
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
 LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
   info()->MarkAsDeferredCalling();
   LOperand* size = instr->size()->IsConstant()
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 32db0dd..3e94301 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -49,6 +49,7 @@
 #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
+  V(AllocateObject)                             \
   V(Allocate)                                   \
   V(ApplyArguments)                             \
   V(ArgumentsElements)                          \
@@ -487,17 +488,44 @@
 template<int I, int T>
 class LControlInstruction: public LTemplateInstruction<0, I, T> {
  public:
+  LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
   virtual bool IsControl() const { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-  int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
-  int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+  int TrueDestination(LChunk* chunk) {
+    return chunk->LookupDestination(true_block_id());
+  }
+  int FalseDestination(LChunk* chunk) {
+    return chunk->LookupDestination(false_block_id());
+  }
+
+  Label* TrueLabel(LChunk* chunk) {
+    if (true_label_ == NULL) {
+      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+    }
+    return true_label_;
+  }
+  Label* FalseLabel(LChunk* chunk) {
+    if (false_label_ == NULL) {
+      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+    }
+    return false_label_;
+  }
+
+ protected:
+  int true_block_id() { return SuccessorAt(0)->block_id(); }
+  int false_block_id() { return SuccessorAt(1)->block_id(); }
 
  private:
   HControlInstruction* hydrogen() {
     return HControlInstruction::cast(this->hydrogen_value());
   }
+
+  Label* false_label_;
+  Label* true_label_;
 };
 
 
@@ -1215,7 +1243,7 @@
 };
 
 
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
+class LCmpMapAndBranch: public LControlInstruction<1, 1> {
  public:
   LCmpMapAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1228,15 +1256,7 @@
   DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(CompareMap)
 
-  virtual bool IsControl() const { return true; }
-
   Handle<Map> map() const { return hydrogen()->map(); }
-  int true_block_id() const {
-    return hydrogen()->FirstSuccessor()->block_id();
-  }
-  int false_block_id() const {
-    return hydrogen()->SecondSuccessor()->block_id();
-  }
 };
 
 
@@ -2374,6 +2394,7 @@
   LOperand* value() { return inputs_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
 };
 
 
@@ -2417,6 +2438,21 @@
 };
 
 
+class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LAllocateObject(LOperand* temp, LOperand* temp2) {
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
+};
+
+
 class LAllocate: public LTemplateInstruction<1, 2, 2> {
  public:
   LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
@@ -2519,26 +2555,10 @@
 
 class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
  public:
-  LOsrEntry();
+  LOsrEntry() {}
 
   virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
-  LOperand** SpilledRegisterArray() { return register_spills_; }
-  LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
-  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
-  void MarkSpilledDoubleRegister(int allocation_index,
-                                 LOperand* spill_operand);
-
- private:
-  // Arrays of spill slot operands for registers with an assigned spill
-  // slot, i.e., that must also be restored to the spill slot on OSR entry.
-  // NULL if the register has no assigned spill slot.  Indexed by allocation
-  // index.
-  LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
-  LOperand* double_register_spills_[
-      DoubleRegister::kMaxNumAllocatableRegisters];
 };
 
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index d41ddd2..3e3314a 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -3202,6 +3202,14 @@
 }
 
 
+void MacroAssembler::JumpIfNotUniqueName(Register reg,
+                                         Label* not_unique_name) {
+  STATIC_ASSERT(((SYMBOL_TYPE - 1) & kIsInternalizedMask) == kInternalizedTag);
+  Branch(not_unique_name, lt, reg, Operand(kIsInternalizedMask));
+  Branch(not_unique_name, gt, reg, Operand(SYMBOL_TYPE));
+}
+
+
 // Allocates a heap number or jumps to the label if the young space is full and
 // a scavenge is needed.
 void MacroAssembler::AllocateHeapNumber(Register result,
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index c983b8b..ffae2fd 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -1406,6 +1406,8 @@
                                               Register scratch,
                                               Label* failure);
 
+  void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+
   // Test that both first and second are sequential ASCII strings.
   // Assume that they are non-smis.
   void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index a091e5f..601cd6d 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -316,8 +316,6 @@
     if (instr->InstructionBits() == nopInstr) {
       // Short-cut generic nop instructions. They are always valid and they
       // never change the simulator state.
-      set_register(pc, reinterpret_cast<int32_t>(instr) +
-                       Instruction::kInstrSize);
       return;
     }
 
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 51df452..caddb76 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -1714,12 +1714,61 @@
 }
 
 
+Handle<Code> CallStubCompiler::CompileArrayCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<Cell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name,
+    Code::StubType type) {
+  Label miss;
+
+  // Check that function is still array.
+  const int argc = arguments().immediate();
+  GenerateNameCheck(name, &miss);
+  Register receiver = a1;
+
+  if (cell.is_null()) {
+    __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+
+    // Check that the receiver isn't a smi.
+    __ JumpIfSmi(receiver, &miss);
+
+    // Check that the maps haven't changed.
+    CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, a0,
+                    t0, name, &miss);
+  } else {
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
+    GenerateLoadFunctionFromCell(cell, function, &miss);
+  }
+
+  Handle<Smi> kind(Smi::FromInt(GetInitialFastElementsKind()), isolate());
+  Handle<Cell> kind_feedback_cell =
+      isolate()->factory()->NewCell(kind);
+  __ li(a0, Operand(argc));
+  __ li(a2, Operand(kind_feedback_cell));
+  __ li(a1, Operand(function));
+
+  ArrayConstructorStub stub(isolate());
+  __ TailCallStub(&stub);
+
+  __ bind(&miss);
+  GenerateMissBranch();
+
+  // Return the generated code.
+  return GetCode(type, name);
+}
+
+
 Handle<Code> CallStubCompiler::CompileArrayPushCall(
     Handle<Object> object,
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -1964,7 +2013,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -1973,7 +2022,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -2046,7 +2096,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -2055,7 +2105,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2128,7 +2179,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -2137,7 +2188,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2209,7 +2261,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -2218,7 +2270,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2282,7 +2335,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+  return GetCode(type, name);
 }
 
 
@@ -2291,7 +2344,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2411,7 +2465,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+  return GetCode(type, name);
 }
 
 
@@ -2420,7 +2474,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2510,7 +2565,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+  return GetCode(type, name);
 }
 
 
@@ -2692,7 +2747,8 @@
   if (HasCustomCallGenerator(function)) {
     Handle<Code> code = CompileCustomCall(object, holder,
                                           Handle<Cell>::null(),
-                                          function, Handle<String>::cast(name));
+                                          function, Handle<String>::cast(name),
+                                          Code::CONSTANT_FUNCTION);
     // A null handle means bail out to the regular compiler code below.
     if (!code.is_null()) return code;
   }
@@ -2761,7 +2817,8 @@
 
   if (HasCustomCallGenerator(function)) {
     Handle<Code> code = CompileCustomCall(
-        object, holder, cell, function, Handle<String>::cast(name));
+        object, holder, cell, function, Handle<String>::cast(name),
+        Code::NORMAL);
     // A null handle means bail out to the regular compiler code below.
     if (!code.is_null()) return code;
   }
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index e1fd872..28b8fc8 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -1509,6 +1509,11 @@
 };
 
 
+FrameDetails.prototype.stepInPositionsImpl = function() {
+  return %GetStepInPositions(this.break_id_, this.frameId());
+};
+
+
 /**
  * Mirror object for stack frames.
  * @param {number} break_id The break id in the VM for which this frame is
@@ -1669,15 +1674,55 @@
 };
 
 
+FrameMirror.prototype.stepInPositions = function() {
+  var script = this.func().script();
+  var funcOffset = this.func().sourcePosition_();
+
+  var stepInRaw = this.details_.stepInPositionsImpl();
+  var result = [];
+  if (stepInRaw) {
+    for (var i = 0; i < stepInRaw.length; i++) {
+      var posStruct = {};
+      var offset = script.locationFromPosition(funcOffset + stepInRaw[i],
+                                               true);
+      serializeLocationFields(offset, posStruct);
+      var item = {
+        position: posStruct
+      };
+      result.push(item);
+    }
+  }
+
+  return result;
+};
+
+
 FrameMirror.prototype.evaluate = function(source, disable_break,
                                           opt_context_object) {
-  var result = %DebugEvaluate(this.break_id_,
-                              this.details_.frameId(),
-                              this.details_.inlinedFrameIndex(),
-                              source,
-                              Boolean(disable_break),
-                              opt_context_object);
-  return MakeMirror(result);
+  var result_array = %DebugEvaluate(this.break_id_,
+                                    this.details_.frameId(),
+                                    this.details_.inlinedFrameIndex(),
+                                    source,
+                                    Boolean(disable_break),
+                                    opt_context_object);
+  // Silently ignore local variables changes if the frame is optimized.
+  if (!this.isOptimizedFrame()) {
+    var local_scope_on_stack = result_array[1];
+    var local_scope_modifed = result_array[2];
+    for (var n in local_scope_modifed) {
+      var value_on_stack = local_scope_on_stack[n];
+      var value_modifed = local_scope_modifed[n];
+      if (value_on_stack !== value_modifed) {
+        %SetScopeVariableValue(this.break_id_,
+                               this.details_.frameId(),
+                               this.details_.inlinedFrameIndex(),
+                               0,
+                               n,
+                               value_modifed);
+      }
+    }
+  }
+  return MakeMirror(result_array[0]);
 };
 
 
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index ca42383..c0c0e47 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -207,6 +207,9 @@
     case JS_TYPED_ARRAY_TYPE:
       JSTypedArray::cast(this)->JSTypedArrayVerify();
       break;
+    case JS_DATA_VIEW_TYPE:
+      JSDataView::cast(this)->JSDataViewVerify();
+      break;
 
 #define MAKE_STRUCT_CASE(NAME, Name, name) \
   case NAME##_TYPE:                        \
@@ -761,8 +764,8 @@
 }
 
 
-void JSTypedArray::JSTypedArrayVerify() {
-  CHECK(IsJSTypedArray());
+void JSArrayBufferView::JSArrayBufferViewVerify() {
+  CHECK(IsJSArrayBufferView());
   JSObjectVerify();
   VerifyPointer(buffer());
   CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined());
@@ -774,7 +777,12 @@
   VerifyPointer(byte_length());
   CHECK(byte_length()->IsSmi() || byte_length()->IsHeapNumber()
         || byte_length()->IsUndefined());
+}
 
+
+void JSTypedArray::JSTypedArrayVerify() {
+  CHECK(IsJSTypedArray());
+  JSArrayBufferViewVerify();
   VerifyPointer(length());
   CHECK(length()->IsSmi() || length()->IsHeapNumber()
         || length()->IsUndefined());
@@ -783,6 +791,12 @@
 }
 
 
+void JSDataView::JSDataViewVerify() {
+  CHECK(IsJSDataView());
+  JSArrayBufferViewVerify();
+}
+
+
 void Foreign::ForeignVerify() {
   CHECK(IsForeign());
 }
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 5d4d9d3..ea347fd 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -221,12 +221,9 @@
 bool Object::IsInternalizedString() {
   if (!this->IsHeapObject()) return false;
   uint32_t type = HeapObject::cast(this)->map()->instance_type();
-  // Because the internalized tag is non-zero and no non-string types have the
-  // internalized bit set we can test for internalized strings with a very
-  // simple test operation.
   STATIC_ASSERT(kInternalizedTag != 0);
-  ASSERT(kNotStringTag + kIsInternalizedMask > LAST_TYPE);
-  return (type & kIsInternalizedMask) != 0;
+  return (type & (kIsNotStringMask | kIsInternalizedMask)) ==
+      (kInternalizedTag | kStringTag);
 }
 
 
@@ -323,7 +320,8 @@
 bool StringShape::IsInternalized() {
   ASSERT(valid());
   STATIC_ASSERT(kInternalizedTag != 0);
-  return (type_ & kIsInternalizedMask) != 0;
+  return (type_ & (kIsNotStringMask | kIsInternalizedMask)) ==
+      (kInternalizedTag | kStringTag);
 }
 
 
@@ -695,6 +693,14 @@
 TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
 TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
 TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
+TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
+
+
+bool Object::IsJSArrayBufferView() {
+  return IsJSDataView() || IsJSTypedArray();
+}
+
+
 TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
 
 
@@ -1633,6 +1639,7 @@
   WRITE_FIELD(this, kValueOffset, val);
 }
 
+ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
 
 Object* PropertyCell::type_raw() {
   return READ_FIELD(this, kTypeOffset);
@@ -1673,6 +1680,8 @@
       return JSArrayBuffer::kSize;
     case JS_TYPED_ARRAY_TYPE:
       return JSTypedArray::kSize;
+    case JS_DATA_VIEW_TYPE:
+      return JSDataView::kSize;
     case JS_SET_TYPE:
       return JSSet::kSize;
     case JS_MAP_TYPE:
@@ -2551,7 +2560,9 @@
 CAST_ACCESSOR(Code)
 CAST_ACCESSOR(JSArray)
 CAST_ACCESSOR(JSArrayBuffer)
+CAST_ACCESSOR(JSArrayBufferView)
 CAST_ACCESSOR(JSTypedArray)
+CAST_ACCESSOR(JSDataView)
 CAST_ACCESSOR(JSRegExp)
 CAST_ACCESSOR(JSProxy)
 CAST_ACCESSOR(JSFunctionProxy)
@@ -3376,15 +3387,13 @@
 int HeapObject::SizeFromMap(Map* map) {
   int instance_size = map->instance_size();
   if (instance_size != kVariableSizeSentinel) return instance_size;
-  // We can ignore the "internalized" bit because it is only set for strings
-  // and thus implies a string type.
-  int instance_type =
-      static_cast<int>(map->instance_type()) & ~kIsInternalizedMask;
   // Only inline the most frequent cases.
+  int instance_type = static_cast<int>(map->instance_type());
   if (instance_type == FIXED_ARRAY_TYPE) {
     return FixedArray::BodyDescriptor::SizeOf(map, this);
   }
-  if (instance_type == ASCII_STRING_TYPE) {
+  if (instance_type == ASCII_STRING_TYPE ||
+      instance_type == ASCII_INTERNALIZED_STRING_TYPE) {
     return SeqOneByteString::SizeFor(
         reinterpret_cast<SeqOneByteString*>(this)->length());
   }
@@ -3394,7 +3403,8 @@
   if (instance_type == FREE_SPACE_TYPE) {
     return reinterpret_cast<FreeSpace*>(this)->size();
   }
-  if (instance_type == STRING_TYPE) {
+  if (instance_type == STRING_TYPE ||
+      instance_type == INTERNALIZED_STRING_TYPE) {
     return SeqTwoByteString::SizeFor(
         reinterpret_cast<SeqTwoByteString*>(this)->length());
   }
@@ -4029,7 +4039,12 @@
 
 bool Code::is_inline_cache_stub() {
   Kind kind = this->kind();
-  return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
+  switch (kind) {
+#define CASE(name) case name: return true;
+    IC_KIND_LIST(CASE)
+#undef CASE
+    default: return false;
+  }
 }
 
 
@@ -4434,7 +4449,7 @@
 
 ACCESSORS(Script, source, Object, kSourceOffset)
 ACCESSORS(Script, name, Object, kNameOffset)
-ACCESSORS(Script, id, Object, kIdOffset)
+ACCESSORS(Script, id, Smi, kIdOffset)
 ACCESSORS_TO_SMI(Script, line_offset, kLineOffsetOffset)
 ACCESSORS_TO_SMI(Script, column_offset, kColumnOffsetOffset)
 ACCESSORS(Script, data, Object, kDataOffset)
@@ -4781,17 +4796,6 @@
 }
 
 
-int SharedFunctionInfo::code_age() {
-  return (compiler_hints() >> kCodeAgeShift) & kCodeAgeMask;
-}
-
-
-void SharedFunctionInfo::set_code_age(int code_age) {
-  int hints = compiler_hints() & ~(kCodeAgeMask << kCodeAgeShift);
-  set_compiler_hints(hints | ((code_age & kCodeAgeMask) << kCodeAgeShift));
-}
-
-
 int SharedFunctionInfo::ic_age() {
   return ICAgeBits::decode(counters());
 }
@@ -4895,12 +4899,7 @@
 
 
 Code* JSFunction::code() {
-  return Code::cast(unchecked_code());
-}
-
-
-Code* JSFunction::unchecked_code() {
-  return reinterpret_cast<Code*>(
+  return Code::cast(
       Code::GetObjectFromEntryAddress(FIELD_ADDR(this, kCodeEntryOffset)));
 }
 
@@ -4945,11 +4944,6 @@
 }
 
 
-Object* JSFunction::unchecked_context() {
-  return READ_FIELD(this, kContextOffset);
-}
-
-
 void JSFunction::set_context(Object* value) {
   ASSERT(value->IsUndefined() || value->IsContext());
   WRITE_FIELD(this, kContextOffset, value);
@@ -5309,14 +5303,14 @@
 
 
 ACCESSORS(JSArrayBuffer, weak_next, Object, kWeakNextOffset)
-ACCESSORS(JSArrayBuffer, weak_first_array, Object, kWeakFirstArrayOffset)
+ACCESSORS(JSArrayBuffer, weak_first_view, Object, kWeakFirstViewOffset)
 
 
-ACCESSORS(JSTypedArray, buffer, Object, kBufferOffset)
-ACCESSORS(JSTypedArray, byte_offset, Object, kByteOffsetOffset)
-ACCESSORS(JSTypedArray, byte_length, Object, kByteLengthOffset)
+ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset)
+ACCESSORS(JSArrayBufferView, byte_offset, Object, kByteOffsetOffset)
+ACCESSORS(JSArrayBufferView, byte_length, Object, kByteLengthOffset)
+ACCESSORS(JSArrayBufferView, weak_next, Object, kWeakNextOffset)
 ACCESSORS(JSTypedArray, length, Object, kLengthOffset)
-ACCESSORS(JSTypedArray, weak_next, Object, kWeakNextOffset)
 
 ACCESSORS(JSRegExp, data, Object, kDataOffset)
 
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index a57088b..f1616da 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -194,6 +194,9 @@
     case JS_TYPED_ARRAY_TYPE:
       JSTypedArray::cast(this)->JSTypedArrayPrint(out);
       break;
+    case JS_DATA_VIEW_TYPE:
+      JSDataView::cast(this)->JSDataViewPrint(out);
+      break;
 #define MAKE_STRUCT_CASE(NAME, Name, name) \
   case NAME##_TYPE:                        \
     Name::cast(this)->Name##Print(out);    \
@@ -551,8 +554,9 @@
     case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
     case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
     case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
-    case JS_TYPED_ARRAY_TYPE: return "JS_TYPED_ARRAY";
     case JS_ARRAY_BUFFER_TYPE: return "JS_ARRAY_BUFFER";
+    case JS_TYPED_ARRAY_TYPE: return "JS_TYPED_ARRAY";
+    case JS_DATA_VIEW_TYPE: return "JS_DATA_VIEW";
     case FOREIGN_TYPE: return "FOREIGN";
     case JS_MESSAGE_OBJECT_TYPE: return "JS_MESSAGE_OBJECT_TYPE";
 #define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
@@ -821,7 +825,7 @@
 
 void JSTypedArray::JSTypedArrayPrint(FILE* out) {
   HeapObject::PrintHeader(out, "JSTypedArray");
-  PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
   PrintF(out, " - buffer =");
   buffer()->ShortPrint(out);
   PrintF(out, "\n - byte_offset = ");
@@ -835,6 +839,19 @@
 }
 
 
+void JSDataView::JSDataViewPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "JSDataView");
+  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
+  PrintF(out, " - buffer =");
+  buffer()->ShortPrint(out);
+  PrintF(out, "\n - byte_offset = ");
+  byte_offset()->ShortPrint(out);
+  PrintF(out, "\n - byte_length = ");
+  byte_length()->ShortPrint(out);
+  PrintF("\n");
+}
+
+
 void JSFunction::JSFunctionPrint(FILE* out) {
   HeapObject::PrintHeader(out, "Function");
   PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
@@ -847,7 +864,7 @@
   PrintF(out, "\n   - name = ");
   shared()->name()->Print(out);
   PrintF(out, "\n - context = ");
-  unchecked_context()->ShortPrint(out);
+  context()->ShortPrint(out);
   PrintF(out, "\n - literals = ");
   literals()->ShortPrint(out);
   PrintF(out, "\n - code = ");
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
index 9dc245c..cfb7d44 100644
--- a/src/objects-visiting-inl.h
+++ b/src/objects-visiting-inl.h
@@ -83,6 +83,8 @@
 
   table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
 
+  table_.Register(kVisitJSDataView, &VisitJSDataView);
+
   table_.Register(kVisitFreeSpace, &VisitFreeSpace);
 
   table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit);
@@ -108,7 +110,7 @@
   Heap* heap = map->GetHeap();
 
   STATIC_ASSERT(
-      JSArrayBuffer::kWeakFirstArrayOffset ==
+      JSArrayBuffer::kWeakFirstViewOffset ==
       JSArrayBuffer::kWeakNextOffset + kPointerSize);
   VisitPointers(
       heap,
@@ -140,6 +142,22 @@
 
 
 template<typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitJSDataView(
+    Map* map, HeapObject* object) {
+  VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
+      HeapObject::RawField(object, JSDataView::kWeakNextOffset));
+  VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object,
+          JSDataView::kWeakNextOffset + kPointerSize),
+      HeapObject::RawField(object, JSDataView::kSize));
+  return JSDataView::kSize;
+}
+
+
+template<typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::Initialize() {
   table_.Register(kVisitShortcutCandidate,
                   &FixedBodyVisitor<StaticVisitor,
@@ -194,6 +212,8 @@
 
   table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
 
+  table_.Register(kVisitJSDataView, &VisitJSDataView);
+
   // Registration for kVisitJSRegExp is done by StaticVisitor.
 
   table_.Register(kVisitCell,
@@ -201,10 +221,7 @@
                   Cell::BodyDescriptor,
                   void>::Visit);
 
-  table_.Register(kVisitPropertyCell,
-                  &FixedBodyVisitor<StaticVisitor,
-                  PropertyCell::BodyDescriptor,
-                  void>::Visit);
+  table_.Register(kVisitPropertyCell, &VisitPropertyCell);
 
   table_.template RegisterSpecializations<DataObjectVisitor,
                                           kVisitDataObject,
@@ -339,6 +356,30 @@
 
 
 template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitPropertyCell(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+
+  Object** slot =
+      HeapObject::RawField(object, PropertyCell::kDependentCodeOffset);
+  if (FLAG_collect_maps) {
+    // Mark property cell dependent codes array but do not push it onto marking
+    // stack, this will make references from it weak. We will clean dead
+    // codes when we iterate over property cells in ClearNonLiveReferences.
+    HeapObject* obj = HeapObject::cast(*slot);
+    heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+    StaticVisitor::MarkObjectWithoutPush(heap, obj);
+  } else {
+    StaticVisitor::VisitPointer(heap, slot);
+  }
+
+  StaticVisitor::VisitPointers(heap,
+      HeapObject::RawField(object, PropertyCell::kPointerFieldsBeginOffset),
+      HeapObject::RawField(object, PropertyCell::kPointerFieldsEndOffset));
+}
+
+
+template<typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitCode(
     Map* map, HeapObject* object) {
   Heap* heap = map->GetHeap();
@@ -456,7 +497,7 @@
   Heap* heap = map->GetHeap();
 
   STATIC_ASSERT(
-      JSArrayBuffer::kWeakFirstArrayOffset ==
+      JSArrayBuffer::kWeakFirstViewOffset ==
       JSArrayBuffer::kWeakNextOffset + kPointerSize);
   StaticVisitor::VisitPointers(
       heap,
@@ -486,6 +527,21 @@
 
 
 template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSDataView(
+    Map* map, HeapObject* object) {
+  StaticVisitor::VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
+      HeapObject::RawField(object, JSDataView::kWeakNextOffset));
+  StaticVisitor::VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object,
+        JSDataView::kWeakNextOffset + kPointerSize),
+      HeapObject::RawField(object, JSDataView::kSize));
+}
+
+
+template<typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(
     Heap* heap, Map* map) {
   // Make sure that the back pointer stored either in the map itself or
@@ -606,16 +662,11 @@
   // by optimized version of function.
   MarkBit code_mark = Marking::MarkBitFrom(function->code());
   if (code_mark.Get()) {
-    if (!FLAG_age_code) {
-      if (!Marking::MarkBitFrom(shared_info).Get()) {
-        shared_info->set_code_age(0);
-      }
-    }
     return false;
   }
 
   // The function must have a valid context and not be a builtin.
-  if (!IsValidNonBuiltinContext(function->unchecked_context())) {
+  if (!IsValidNonBuiltinContext(function->context())) {
     return false;
   }
 
@@ -682,20 +733,12 @@
     return false;
   }
 
-  if (FLAG_age_code) {
-    return shared_info->code()->IsOld();
-  } else {
-    // How many collections newly compiled code object will survive before being
-    // flushed.
-    static const int kCodeAgeThreshold = 5;
-
-    // Age this shared function info.
-    if (shared_info->code_age() < kCodeAgeThreshold) {
-      shared_info->set_code_age(shared_info->code_age() + 1);
-      return false;
-    }
-    return true;
+  // Check age of code. If code aging is disabled we never flush.
+  if (!FLAG_age_code || !shared_info->code()->IsOld()) {
+    return false;
   }
+
+  return true;
 }
 
 
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
index 686cacf..6502209 100644
--- a/src/objects-visiting.cc
+++ b/src/objects-visiting.cc
@@ -143,6 +143,9 @@
     case JS_TYPED_ARRAY_TYPE:
       return kVisitJSTypedArray;
 
+    case JS_DATA_VIEW_TYPE:
+      return kVisitJSDataView;
+
     case JS_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index f347c87..c2ab45d 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -95,6 +95,7 @@
   V(JSWeakMap)                \
   V(JSArrayBuffer)            \
   V(JSTypedArray)             \
+  V(JSDataView)               \
   V(JSRegExp)
 
   // For data objects, JS objects and structs along with generic visitor which
@@ -338,6 +339,7 @@
 
   INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
   INLINE(static int VisitJSTypedArray(Map* map, HeapObject* object));
+  INLINE(static int VisitJSDataView(Map* map, HeapObject* object));
 
   class DataObjectVisitor {
    public:
@@ -393,6 +395,7 @@
     table_.GetVisitor(map)(map, obj);
   }
 
+  INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
   INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
   INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
   INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
@@ -415,6 +418,7 @@
   INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
   INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
   INLINE(static void VisitJSTypedArray(Map* map, HeapObject* object));
+  INLINE(static void VisitJSDataView(Map* map, HeapObject* object));
   INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
 
   // Mark pointers in a Map and its TransitionArray together, possibly
diff --git a/src/objects.cc b/src/objects.cc
index e9b7ec2..d1a617e 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -47,7 +47,6 @@
 #include "safepoint-table.h"
 #include "string-stream.h"
 #include "utils.h"
-#include "vm-state-inl.h"
 
 #ifdef ENABLE_DISASSEMBLER
 #include "disasm.h"
@@ -375,14 +374,8 @@
     Handle<String> key(String::cast(name));
     LOG(isolate, ApiNamedPropertyAccess("load", self, name));
     PropertyCallbackArguments args(isolate, data->data(), self, this);
-    v8::Handle<v8::Value> result;
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      ExternalCallbackScope call_scope(isolate,
-                                       v8::ToCData<Address>(fun_obj));
-      result = args.Call(call_fun, v8::Utils::ToLocal(key));
-    }
+    v8::Handle<v8::Value> result =
+        args.Call(call_fun, v8::Utils::ToLocal(key));
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     if (result.IsEmpty()) {
       return isolate->heap()->undefined_value();
@@ -668,8 +661,7 @@
     Object* store_value = value;
     if (IsGlobalObject()) {
       Heap* heap = name->GetHeap();
-      MaybeObject* maybe_store_value =
-          heap->AllocatePropertyCell(value);
+      MaybeObject* maybe_store_value = heap->AllocatePropertyCell(value);
       if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
     }
     Object* dict;
@@ -1631,6 +1623,7 @@
     case JS_ARRAY_TYPE:
     case JS_ARRAY_BUFFER_TYPE:
     case JS_TYPED_ARRAY_TYPE:
+    case JS_DATA_VIEW_TYPE:
     case JS_SET_TYPE:
     case JS_MAP_TYPE:
     case JS_WEAK_MAP_TYPE:
@@ -2736,18 +2729,13 @@
     PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
     v8::NamedPropertySetter setter =
         v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
-    v8::Handle<v8::Value> result;
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      Handle<Object> value_unhole(value->IsTheHole() ?
-                                  isolate->heap()->undefined_value() :
-                                  value,
-                                  isolate);
-      result = args.Call(setter,
-                         v8::Utils::ToLocal(name_handle),
-                         v8::Utils::ToLocal(value_unhole));
-    }
+    Handle<Object> value_unhole(value->IsTheHole() ?
+                                isolate->heap()->undefined_value() :
+                                value,
+                                isolate);
+    v8::Handle<v8::Value> result = args.Call(setter,
+                                             v8::Utils::ToLocal(name_handle),
+                                             v8::Utils::ToLocal(value_unhole));
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     if (!result.IsEmpty()) return *value_handle;
   }
@@ -2846,17 +2834,11 @@
     if (call_fun == NULL) return value;
     Handle<String> key(String::cast(name));
     LOG(isolate, ApiNamedPropertyAccess("store", this, name));
-    PropertyCallbackArguments
-      args(isolate, data->data(), this, JSObject::cast(holder));
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      ExternalCallbackScope call_scope(isolate,
-                                       v8::ToCData<Address>(call_obj));
-      args.Call(call_fun,
-                v8::Utils::ToLocal(key),
-                v8::Utils::ToLocal(value_handle));
-    }
+    PropertyCallbackArguments args(
+        isolate, data->data(), this, JSObject::cast(holder));
+    args.Call(call_fun,
+              v8::Utils::ToLocal(key),
+              v8::Utils::ToLocal(value_handle));
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     return *value_handle;
   }
@@ -4187,12 +4169,8 @@
         v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
     LOG(isolate,
         ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
-    v8::Handle<v8::Integer> result;
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      result = args.Call(query, v8::Utils::ToLocal(name_handle));
-    }
+    v8::Handle<v8::Integer> result =
+        args.Call(query, v8::Utils::ToLocal(name_handle));
     if (!result.IsEmpty()) {
       ASSERT(result->IsInt32());
       return static_cast<PropertyAttributes>(result->Int32Value());
@@ -4202,12 +4180,8 @@
         v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
     LOG(isolate,
         ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
-    v8::Handle<v8::Value> result;
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      result = args.Call(getter, v8::Utils::ToLocal(name_handle));
-    }
+    v8::Handle<v8::Value> result =
+        args.Call(getter, v8::Utils::ToLocal(name_handle));
     if (!result.IsEmpty()) return DONT_ENUM;
   }
   return holder_handle->GetPropertyAttributePostInterceptor(*receiver_handle,
@@ -4327,12 +4301,7 @@
         v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
     LOG(isolate,
         ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
-    v8::Handle<v8::Integer> result;
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      result = args.Call(query, index);
-    }
+    v8::Handle<v8::Integer> result = args.Call(query, index);
     if (!result.IsEmpty())
       return static_cast<PropertyAttributes>(result->Int32Value());
   } else if (!interceptor->getter()->IsUndefined()) {
@@ -4340,12 +4309,7 @@
         v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
     LOG(isolate,
         ApiIndexedPropertyAccess("interceptor-indexed-get-has", this, index));
-    v8::Handle<v8::Value> result;
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      result = args.Call(getter, index);
-    }
+    v8::Handle<v8::Value> result = args.Call(getter, index);
     if (!result.IsEmpty()) return NONE;
   }
 
@@ -4769,7 +4733,7 @@
     // For a proxy, use the prototype as target object.
     Object* proxy_parent = GetPrototype();
     // If the proxy is detached, return undefined.
-    if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
+    if (proxy_parent->IsNull()) return GetHeap()->the_hole_value();
     ASSERT(proxy_parent->IsJSGlobalObject());
     return JSObject::cast(proxy_parent)->GetHiddenProperty(key);
   }
@@ -4783,15 +4747,14 @@
     if (key == GetHeap()->identity_hash_string()) {
       return inline_value;
     } else {
-      return GetHeap()->undefined_value();
+      return GetHeap()->the_hole_value();
     }
   }
 
-  if (inline_value->IsUndefined()) return GetHeap()->undefined_value();
+  if (inline_value->IsUndefined()) return GetHeap()->the_hole_value();
 
   ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
   Object* entry = hashtable->Lookup(key);
-  if (entry->IsTheHole()) return GetHeap()->undefined_value();
   return entry;
 }
 
@@ -5015,12 +4978,8 @@
     LOG(isolate,
         ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
     PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
-    v8::Handle<v8::Boolean> result;
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      result = args.Call(deleter, v8::Utils::ToLocal(name_handle));
-    }
+    v8::Handle<v8::Boolean> result =
+        args.Call(deleter, v8::Utils::ToLocal(name_handle));
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     if (!result.IsEmpty()) {
       ASSERT(result->IsBoolean());
@@ -5051,12 +5010,7 @@
   LOG(isolate,
       ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
   PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
-  v8::Handle<v8::Boolean> result;
-  {
-    // Leaving JavaScript.
-    VMState<EXTERNAL> state(isolate);
-    result = args.Call(deleter, index);
-  }
+  v8::Handle<v8::Boolean> result = args.Call(deleter, index);
   RETURN_IF_SCHEDULED_EXCEPTION(isolate);
   if (!result.IsEmpty()) {
     ASSERT(result->IsBoolean());
@@ -9186,7 +9140,8 @@
 
 
 void JSFunction::MarkForLazyRecompilation() {
-  ASSERT(is_compiled() && !IsOptimized());
+  ASSERT(is_compiled() || GetIsolate()->debugger()->IsDebuggerActive());
+  ASSERT(!IsOptimized());
   ASSERT(shared()->allows_lazy_compilation() ||
          code()->optimizable());
   set_code_no_write_barrier(
@@ -9196,7 +9151,8 @@
 
 
 void JSFunction::MarkForParallelRecompilation() {
-  ASSERT(is_compiled() && !IsOptimized());
+  ASSERT(is_compiled() || GetIsolate()->debugger()->IsDebuggerActive());
+  ASSERT(!IsOptimized());
   ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
   if (!FLAG_parallel_recompilation) {
     JSFunction::MarkForLazyRecompilation();
@@ -9214,7 +9170,8 @@
 
 
 void JSFunction::MarkForInstallingRecompiledCode() {
-  ASSERT(is_compiled() && !IsOptimized());
+  ASSERT(is_compiled() || GetIsolate()->debugger()->IsDebuggerActive());
+  ASSERT(!IsOptimized());
   ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
   ASSERT(FLAG_parallel_recompilation);
   set_code_no_write_barrier(
@@ -9224,7 +9181,8 @@
 
 
 void JSFunction::MarkInRecompileQueue() {
-  ASSERT(is_compiled() && !IsOptimized());
+  ASSERT(is_compiled() || GetIsolate()->debugger()->IsDebuggerActive());
+  ASSERT(!IsOptimized());
   ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
   ASSERT(FLAG_parallel_recompilation);
   if (FLAG_trace_parallel_recompilation) {
@@ -9403,7 +9361,6 @@
   bool result = true;
   if (function->shared()->is_compiled()) {
     function->ReplaceCode(function->shared()->code());
-    function->shared()->set_code_age(0);
   } else {
     ASSERT(function->shared()->allows_lazy_compilation());
     CompilationInfoWithZone info(function);
@@ -10026,6 +9983,12 @@
       extended_extra_ic_state());
 }
 
+byte Code::compare_nil_value() {
+  ASSERT(is_compare_nil_ic_stub());
+  return CompareNilICStub::ExtractNilValueFromExtraICState(
+      extended_extra_ic_state());
+}
+
 
 void Code::InvalidateRelocation() {
   set_relocation_info(GetHeap()->empty_byte_array());
@@ -10150,29 +10113,45 @@
 }
 
 
-Map* Code::FindFirstMap() {
+Object* Code::FindNthObject(int n, Map* match_map) {
   ASSERT(is_inline_cache_stub());
   DisallowHeapAllocation no_allocation;
   int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
   for (RelocIterator it(this, mask); !it.done(); it.next()) {
     RelocInfo* info = it.rinfo();
     Object* object = info->target_object();
-    if (object->IsMap()) return Map::cast(object);
+    if (object->IsHeapObject()) {
+      if (HeapObject::cast(object)->map() == match_map) {
+        if (--n == 0) return object;
+      }
+    }
   }
   return NULL;
 }
 
 
-void Code::ReplaceFirstMap(Map* replace_with) {
+Map* Code::FindFirstMap() {
+  Object* result = FindNthObject(1, GetHeap()->meta_map());
+  return (result != NULL) ? Map::cast(result) : NULL;
+}
+
+
+void Code::ReplaceNthObject(int n,
+                            Map* match_map,
+                            Object* replace_with) {
   ASSERT(is_inline_cache_stub());
   DisallowHeapAllocation no_allocation;
   int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
   for (RelocIterator it(this, mask); !it.done(); it.next()) {
     RelocInfo* info = it.rinfo();
     Object* object = info->target_object();
-    if (object->IsMap()) {
-      info->set_target_object(replace_with);
-      return;
+    if (object->IsHeapObject()) {
+      if (HeapObject::cast(object)->map() == match_map) {
+        if (--n == 0) {
+          info->set_target_object(replace_with);
+          return;
+        }
+      }
     }
   }
   UNREACHABLE();
@@ -10191,6 +10170,11 @@
 }
 
 
+void Code::ReplaceFirstMap(Map* replace_with) {
+  ReplaceNthObject(1, GetHeap()->meta_map(), replace_with);
+}
+
+
 Code* Code::FindFirstCode() {
   ASSERT(is_inline_cache_stub());
   DisallowHeapAllocation no_allocation;
@@ -10232,6 +10216,21 @@
 }
 
 
+void Code::ReplaceNthCell(int n, Cell* replace_with) {
+  ASSERT(is_inline_cache_stub());
+  DisallowHeapAllocation no_allocation;
+  int mask = RelocInfo::ModeMask(RelocInfo::CELL);
+  for (RelocIterator it(this, mask); !it.done(); it.next()) {
+    RelocInfo* info = it.rinfo();
+    if (--n == 0) {
+      info->set_target_cell(replace_with);
+      return;
+    }
+  }
+  UNREACHABLE();
+}
+
+
 void Code::ClearInlineCaches() {
   int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
              RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
@@ -10389,21 +10388,10 @@
 // Identify kind of code.
 const char* Code::Kind2String(Kind kind) {
   switch (kind) {
-    case FUNCTION: return "FUNCTION";
-    case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
-    case STUB: return "STUB";
-    case BUILTIN: return "BUILTIN";
-    case LOAD_IC: return "LOAD_IC";
-    case KEYED_LOAD_IC: return "KEYED_LOAD_IC";
-    case STORE_IC: return "STORE_IC";
-    case KEYED_STORE_IC: return "KEYED_STORE_IC";
-    case CALL_IC: return "CALL_IC";
-    case KEYED_CALL_IC: return "KEYED_CALL_IC";
-    case UNARY_OP_IC: return "UNARY_OP_IC";
-    case BINARY_OP_IC: return "BINARY_OP_IC";
-    case COMPARE_IC: return "COMPARE_IC";
-    case COMPARE_NIL_IC: return "COMPARE_NIL_IC";
-    case TO_BOOLEAN_IC: return "TO_BOOLEAN_IC";
+#define CASE(name) case name: return #name;
+    CODE_KIND_LIST(CASE)
+#undef CASE
+    case NUMBER_OF_KINDS: break;
   }
   UNREACHABLE();
   return NULL;
@@ -10498,9 +10486,6 @@
           break;
         }
 
-        case Translation::DUPLICATE:
-          break;
-
         case Translation::REGISTER: {
           int reg_code = iterator.Next();
             PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
@@ -11114,7 +11099,7 @@
   Handle<DependentCode> codes =
       DependentCode::Insert(dep, group, info->object_wrapper());
   if (*codes != dependent_code()) set_dependent_code(*codes);
-  info->dependent_maps(group)->Add(Handle<Map>(this), info->zone());
+  info->dependencies(group)->Add(Handle<HeapObject>(this), info->zone());
 }
 
 
@@ -11140,6 +11125,16 @@
 }
 
 
+DependentCode* DependentCode::ForObject(Handle<HeapObject> object,
+                                        DependencyGroup group) {
+  AllowDeferredHandleDereference dependencies_are_safe;
+  if (group == DependentCode::kPropertyCellChangedGroup) {
+    return Handle<PropertyCell>::cast(object)->dependent_code();
+  }
+  return Handle<Map>::cast(object)->dependent_code();
+}
+
+
 Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
                                             DependencyGroup group,
                                             Handle<Object> object) {
@@ -11278,7 +11273,7 @@
       code->set_marked_for_deoptimization(true);
     } else {
       CompilationInfo* info = compilation_info_at(i);
-      info->AbortDueToDependentMap();
+      info->AbortDueToDependencyChange();
     }
   }
   // Compact the array by moving all subsequent groups to fill in the new holes.
@@ -11465,12 +11460,8 @@
     LOG(isolate,
         ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
     PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
-    v8::Handle<v8::Value> result;
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      result = args.Call(setter, index, v8::Utils::ToLocal(value_handle));
-    }
+    v8::Handle<v8::Value> result =
+        args.Call(setter, index, v8::Utils::ToLocal(value_handle));
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     if (!result.IsEmpty()) return *value_handle;
   }
@@ -11508,12 +11499,7 @@
     LOG(isolate, ApiNamedPropertyAccess("load", *self, *key));
     PropertyCallbackArguments
         args(isolate, data->data(), *self, *holder_handle);
-    v8::Handle<v8::Value> result;
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      result = args.Call(call_fun, v8::Utils::ToLocal(key));
-    }
+    v8::Handle<v8::Value> result = args.Call(call_fun, v8::Utils::ToLocal(key));
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     if (result.IsEmpty()) return isolate->heap()->undefined_value();
     Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
@@ -11575,13 +11561,9 @@
     LOG(isolate, ApiNamedPropertyAccess("store", *self, *key));
     PropertyCallbackArguments
         args(isolate, data->data(), *self, *holder_handle);
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      args.Call(call_fun,
-                v8::Utils::ToLocal(key),
-                v8::Utils::ToLocal(value_handle));
-    }
+    args.Call(call_fun,
+              v8::Utils::ToLocal(key),
+              v8::Utils::ToLocal(value_handle));
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     return *value_handle;
   }
@@ -12481,12 +12463,7 @@
         ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
     PropertyCallbackArguments
         args(isolate, interceptor->data(), receiver, this);
-    v8::Handle<v8::Value> result;
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      result = args.Call(getter, index);
-    }
+    v8::Handle<v8::Value> result = args.Call(getter, index);
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     if (!result.IsEmpty()) {
       Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
@@ -12791,12 +12768,8 @@
         ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
     PropertyCallbackArguments
         args(isolate, interceptor->data(), receiver, this);
-    v8::Handle<v8::Value> result;
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      result = args.Call(getter, v8::Utils::ToLocal(name_handle));
-    }
+    v8::Handle<v8::Value> result =
+        args.Call(getter, v8::Utils::ToLocal(name_handle));
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     if (!result.IsEmpty()) {
       *attributes = NONE;
@@ -15805,9 +15778,19 @@
 }
 
 
-void JSTypedArray::Neuter() {
+void JSArrayBufferView::NeuterView() {
   set_byte_offset(Smi::FromInt(0));
   set_byte_length(Smi::FromInt(0));
+}
+
+
+void JSDataView::Neuter() {
+  NeuterView();
+}
+
+
+void JSTypedArray::Neuter() {
+  NeuterView();
   set_length(Smi::FromInt(0));
   set_elements(GetHeap()->EmptyExternalArrayForMap(map()));
 }
@@ -15823,4 +15806,23 @@
 }
 
 
+void PropertyCell::AddDependentCompilationInfo(CompilationInfo* info) {
+  Handle<DependentCode> dep(dependent_code());
+  Handle<DependentCode> codes =
+      DependentCode::Insert(dep, DependentCode::kPropertyCellChangedGroup,
+                            info->object_wrapper());
+  if (*codes != dependent_code()) set_dependent_code(*codes);
+  info->dependencies(DependentCode::kPropertyCellChangedGroup)->Add(
+      Handle<HeapObject>(this), info->zone());
+}
+
+
+void PropertyCell::AddDependentCode(Handle<Code> code) {
+  Handle<DependentCode> codes = DependentCode::Insert(
+      Handle<DependentCode>(dependent_code()),
+      DependentCode::kPropertyCellChangedGroup, code);
+  if (*codes != dependent_code()) set_dependent_code(*codes);
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/objects.h b/src/objects.h
index 1d02ed7..adaa9cf 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -58,7 +58,9 @@
 //         - JSObject
 //           - JSArray
 //           - JSArrayBuffer
-//           - JSTypedArray
+//           - JSArrayBufferView
+//              - JSTypedArray
+//              - JSDataView
 //           - JSSet
 //           - JSMap
 //           - JSWeakMap
@@ -408,6 +410,7 @@
   V(JS_ARRAY_TYPE)                                                             \
   V(JS_ARRAY_BUFFER_TYPE)                                                      \
   V(JS_TYPED_ARRAY_TYPE)                                                       \
+  V(JS_DATA_VIEW_TYPE)                                                         \
   V(JS_PROXY_TYPE)                                                             \
   V(JS_WEAK_MAP_TYPE)                                                          \
   V(JS_REGEXP_TYPE)                                                            \
@@ -572,8 +575,7 @@
 const uint32_t kNotStringTag = 0x80;
 
 // Bit 6 indicates that the object is an internalized string (if set) or not.
-// There are not enough types that the non-string types (with bit 7 set) can
-// have bit 6 set too.
+// Bit 7 has to be clear as well.
 const uint32_t kIsInternalizedMask = 0x40;
 const uint32_t kNotInternalizedTag = 0x0;
 const uint32_t kInternalizedTag = 0x40;
@@ -744,6 +746,7 @@
   JS_ARRAY_TYPE,
   JS_ARRAY_BUFFER_TYPE,
   JS_TYPED_ARRAY_TYPE,
+  JS_DATA_VIEW_TYPE,
   JS_SET_TYPE,
   JS_MAP_TYPE,
   JS_WEAK_MAP_TYPE,
@@ -993,7 +996,9 @@
   V(Boolean)                                   \
   V(JSArray)                                   \
   V(JSArrayBuffer)                             \
+  V(JSArrayBufferView)                         \
   V(JSTypedArray)                              \
+  V(JSDataView)                                \
   V(JSProxy)                                   \
   V(JSFunctionProxy)                           \
   V(JSSet)                                     \
@@ -1990,7 +1995,7 @@
                                           Handle<Object> value);
   // Returns a failure if a GC is required.
   MUST_USE_RESULT MaybeObject* SetHiddenProperty(Name* key, Object* value);
-  // Gets the value of a hidden property with the given key. Returns undefined
+  // Gets the value of a hidden property with the given key. Returns the hole
   // if the property doesn't exist (or if called on a detached proxy),
   // otherwise returns the value set for the key.
   Object* GetHiddenProperty(Name* key);
@@ -4433,38 +4438,40 @@
   // cache state, and arguments count.
   typedef uint32_t Flags;
 
-#define CODE_KIND_LIST(V) \
-  V(FUNCTION)             \
-  V(OPTIMIZED_FUNCTION)   \
-  V(STUB)                 \
-  V(BUILTIN)              \
-  V(LOAD_IC)              \
-  V(KEYED_LOAD_IC)        \
-  V(CALL_IC)              \
-  V(KEYED_CALL_IC)        \
-  V(STORE_IC)             \
-  V(KEYED_STORE_IC)       \
-  V(UNARY_OP_IC)          \
-  V(BINARY_OP_IC)         \
-  V(COMPARE_IC)           \
-  V(COMPARE_NIL_IC)       \
+#define NON_IC_KIND_LIST(V) \
+  V(FUNCTION)               \
+  V(OPTIMIZED_FUNCTION)     \
+  V(STUB)                   \
+  V(BUILTIN)                \
+  V(REGEXP)
+
+#define IC_KIND_LIST(V) \
+  V(LOAD_IC)            \
+  V(KEYED_LOAD_IC)      \
+  V(CALL_IC)            \
+  V(KEYED_CALL_IC)      \
+  V(STORE_IC)           \
+  V(KEYED_STORE_IC)     \
+  V(UNARY_OP_IC)        \
+  V(BINARY_OP_IC)       \
+  V(COMPARE_IC)         \
+  V(COMPARE_NIL_IC)     \
   V(TO_BOOLEAN_IC)
 
+#define CODE_KIND_LIST(V) \
+  NON_IC_KIND_LIST(V)     \
+  IC_KIND_LIST(V)
+
   enum Kind {
 #define DEFINE_CODE_KIND_ENUM(name) name,
     CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
 #undef DEFINE_CODE_KIND_ENUM
-
-    // Pseudo-kinds.
-    LAST_CODE_KIND = TO_BOOLEAN_IC,
-    REGEXP = BUILTIN,
-    FIRST_IC_KIND = LOAD_IC,
-    LAST_IC_KIND = TO_BOOLEAN_IC
+    NUMBER_OF_KINDS
   };
 
   // No more than 16 kinds. The value is currently encoded in four bits in
   // Flags.
-  STATIC_ASSERT(LAST_CODE_KIND < 16);
+  STATIC_ASSERT(NUMBER_OF_KINDS <= 16);
 
   static const char* Kind2String(Kind kind);
 
@@ -4484,10 +4491,6 @@
     PROTOTYPE_STUB
   };
 
-  enum {
-    NUMBER_OF_KINDS = LAST_IC_KIND + 1
-  };
-
   typedef int ExtraICState;
 
   static const ExtraICState kNoExtraICState = 0;
@@ -4663,6 +4666,7 @@
 
   // [compare_nil]: For kind COMPARE_NIL_IC tells what state the stub is in.
   byte compare_nil_state();
+  byte compare_nil_value();
 
   // [has_function_cache]: For kind STUB tells whether there is a function
   // cache is passed to the stub.
@@ -4680,6 +4684,10 @@
   // Get the safepoint entry for the given pc.
   SafepointEntry GetSafepointEntry(Address pc);
 
+  // Find an object in a stub with a specified map
+  Object* FindNthObject(int n, Map* match_map);
+  void ReplaceNthObject(int n, Map* match_map, Object* replace_with);
+
   // Find the first map in an IC stub.
   Map* FindFirstMap();
   void FindAllMaps(MapHandleList* maps);
@@ -4692,6 +4700,8 @@
   // Find the first name in an IC stub.
   Name* FindFirstName();
 
+  void ReplaceNthCell(int n, Cell* replace_with);
+
   class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {};
   class ExtraICStateKeyedAccessStoreMode:
       public BitField<KeyedAccessStoreMode, 1, 4> {};  // NOLINT
@@ -4820,7 +4830,10 @@
   };
 #undef DECLARE_CODE_AGE_ENUM
 
-  // Code aging
+  // Code aging.  Indicates how many full GCs this code has survived without
+  // being entered through the prologue.  Used to determine when it is
+  // relatively safe to flush this code object and replace it with the lazy
+  // compilation stub.
   static void MakeCodeAgeSequenceYoung(byte* sequence);
   void MakeOlder(MarkingParity);
   static bool IsYoungSequence(byte* sequence);
@@ -5019,7 +5032,10 @@
     // Group of code that depends on elements not being added to objects with
     // this map.
     kElementsCantBeAddedGroup,
-    kGroupCount = kElementsCantBeAddedGroup + 1
+    // Group of code that depends on global property values in property cells
+    // not being changed.
+    kPropertyCellChangedGroup,
+    kGroupCount = kPropertyCellChangedGroup + 1
   };
 
   // Array for holding the index of the first code object of each group.
@@ -5061,6 +5077,9 @@
   inline void copy(int from, int to);
   static inline DependentCode* cast(Object* object);
 
+  static DependentCode* ForObject(Handle<HeapObject> object,
+                                  DependencyGroup group);
+
  private:
   // Make a room at the end of the given group by moving out the first
   // code objects of the subsequent groups.
@@ -5473,13 +5492,6 @@
   int NumberOfDescribedProperties(DescriptorFlag which = OWN_DESCRIPTORS,
                                   PropertyAttributes filter = NONE);
 
-  // Returns the number of slots allocated for the initial properties
-  // backing storage for instances of this map.
-  int InitialPropertiesLength() {
-    return pre_allocated_property_fields() + unused_property_fields() -
-        inobject_properties();
-  }
-
   // Casting.
   static inline Map* cast(Object* obj);
 
@@ -5565,7 +5577,7 @@
   inline bool CanOmitPrototypeChecks();
 
   void AddDependentCompilationInfo(DependentCode::DependencyGroup group,
-                                        CompilationInfo* info);
+                                   CompilationInfo* info);
 
   void AddDependentCode(DependentCode::DependencyGroup group,
                         Handle<Code> code);
@@ -5751,7 +5763,7 @@
   DECL_ACCESSORS(name, Object)
 
   // [id]: the script id.
-  DECL_ACCESSORS(id, Object)
+  DECL_ACCESSORS(id, Smi)
 
   // [line_offset]: script line offset in resource from where it was extracted.
   DECL_ACCESSORS(line_offset, Smi)
@@ -5860,8 +5872,8 @@
   V(Math, min, MathMin)                             \
   V(Math, imul, MathImul)
 
-
 enum BuiltinFunctionId {
+  kArrayCode,
 #define DECLARE_FUNCTION_ID(ignored1, ignore2, name)    \
   k##name,
   FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
@@ -6139,14 +6151,6 @@
   // iteration by the debugger).
   DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation_without_context)
 
-  // Indicates how many full GCs this function has survived with assigned
-  // code object. Used to determine when it is relatively safe to flush
-  // this code object and replace it with lazy compilation stub.
-  // Age is reset when GC notices that the code object is referenced
-  // from the stack or compilation cache.
-  inline int code_age();
-  inline void set_code_age(int age);
-
   // Indicates whether optimizations have been disabled for this
   // shared function info. If a function is repeatedly optimized or if
   // we cannot optimize the function we disable optimization to avoid
@@ -6390,15 +6394,11 @@
   static const int kStartPositionMask  = ~((1 << kStartPositionShift) - 1);
 
   // Bit positions in compiler_hints.
-  static const int kCodeAgeSize = 3;
-  static const int kCodeAgeMask = (1 << kCodeAgeSize) - 1;
-
   enum CompilerHints {
     kAllowLazyCompilation,
     kAllowLazyCompilationWithoutContext,
     kLiveObjectsMayExist,
-    kCodeAgeShift,
-    kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
+    kOptimizationDisabled,
     kStrictModeFunction,
     kExtendedModeFunction,
     kUsesArguments,
@@ -6578,7 +6578,6 @@
 
   // [context]: The context for this function.
   inline Context* context();
-  inline Object* unchecked_context();
   inline void set_context(Object* context);
 
   // [code]: The generated code object for this function.  Executed
@@ -6590,8 +6589,6 @@
   inline void set_code_no_write_barrier(Code* code);
   inline void ReplaceCode(Code* code);
 
-  inline Code* unchecked_code();
-
   // Tells whether this function is builtin.
   inline bool IsBuiltin();
 
@@ -8580,9 +8577,14 @@
 
 class PropertyCell: public Cell {
  public:
+  // [type]: type of the global property.
   Type* type();
   void set_type(Type* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
 
+  // [dependent_code]: dependent code that depends on the type of the global
+  // property.
+  DECL_ACCESSORS(dependent_code, DependentCode)
+
   // Casting.
   static inline PropertyCell* cast(Object* obj);
 
@@ -8596,12 +8598,19 @@
 
   // Layout description.
   static const int kTypeOffset = kValueOffset + kPointerSize;
-  static const int kSize = kTypeOffset + kPointerSize;
+  static const int kDependentCodeOffset = kTypeOffset + kPointerSize;
+  static const int kSize = kDependentCodeOffset + kPointerSize;
 
-  typedef FixedBodyDescriptor<
-      kValueOffset,
-      kTypeOffset + kPointerSize,
-      PropertyCell::kSize> BodyDescriptor;
+  static const int kPointerFieldsBeginOffset = kValueOffset;
+  static const int kPointerFieldsEndOffset = kDependentCodeOffset;
+
+  typedef FixedBodyDescriptor<kValueOffset,
+                              kSize,
+                              kSize> BodyDescriptor;
+
+  void AddDependentCompilationInfo(CompilationInfo* info);
+
+  void AddDependentCode(Handle<Code> code);
 
  private:
   DECL_ACCESSORS(type_raw, Object)
@@ -8826,8 +8835,8 @@
   // [weak_next]: linked list of array buffers.
   DECL_ACCESSORS(weak_next, Object)
 
-  // [weak_first_array]: weak linked list of typed arrays.
-  DECL_ACCESSORS(weak_first_array, Object)
+  // [weak_first_array]: weak linked list of views.
+  DECL_ACCESSORS(weak_first_view, Object)
 
   // Casting.
   static inline JSArrayBuffer* cast(Object* obj);
@@ -8843,8 +8852,8 @@
   static const int kByteLengthOffset = kBackingStoreOffset + kPointerSize;
   static const int kFlagOffset = kByteLengthOffset + kPointerSize;
   static const int kWeakNextOffset = kFlagOffset + kPointerSize;
-  static const int kWeakFirstArrayOffset = kWeakNextOffset + kPointerSize;
-  static const int kSize = kWeakFirstArrayOffset + kPointerSize;
+  static const int kWeakFirstViewOffset = kWeakNextOffset + kPointerSize;
+  static const int kSize = kWeakFirstViewOffset + kPointerSize;
 
   static const int kSizeWithInternalFields =
       kSize + v8::ArrayBuffer::kInternalFieldCount * kPointerSize;
@@ -8857,7 +8866,7 @@
 };
 
 
-class JSTypedArray: public JSObject {
+class JSArrayBufferView: public JSObject {
  public:
   // [buffer]: ArrayBuffer that this typed array views.
   DECL_ACCESSORS(buffer, Object)
@@ -8868,12 +8877,33 @@
   // [byte_length]: length of typed array in bytes.
   DECL_ACCESSORS(byte_length, Object)
 
-  // [length]: length of typed array in elements.
-  DECL_ACCESSORS(length, Object)
-
   // [weak_next]: linked list of typed arrays over the same array buffer.
   DECL_ACCESSORS(weak_next, Object)
 
+  // Casting.
+  static inline JSArrayBufferView* cast(Object* obj);
+
+  DECLARE_VERIFIER(JSArrayBufferView)
+
+  static const int kBufferOffset = JSObject::kHeaderSize;
+  static const int kByteOffsetOffset = kBufferOffset + kPointerSize;
+  static const int kByteLengthOffset = kByteOffsetOffset + kPointerSize;
+  static const int kWeakNextOffset = kByteLengthOffset + kPointerSize;
+  static const int kViewSize = kWeakNextOffset + kPointerSize;
+
+ protected:
+  void NeuterView();
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBufferView);
+};
+
+
+class JSTypedArray: public JSArrayBufferView {
+ public:
+  // [length]: length of typed array in elements.
+  DECL_ACCESSORS(length, Object)
+
   // Neutering. Only neuters this typed array.
   void Neuter();
 
@@ -8887,18 +8917,33 @@
   DECLARE_PRINTER(JSTypedArray)
   DECLARE_VERIFIER(JSTypedArray)
 
-  static const int kBufferOffset = JSObject::kHeaderSize;
-  static const int kByteOffsetOffset = kBufferOffset + kPointerSize;
-  static const int kByteLengthOffset = kByteOffsetOffset + kPointerSize;
-  static const int kLengthOffset = kByteLengthOffset + kPointerSize;
-  static const int kWeakNextOffset = kLengthOffset + kPointerSize;
-  static const int kSize = kWeakNextOffset + kPointerSize;
+  static const int kLengthOffset = kViewSize + kPointerSize;
+  static const int kSize = kLengthOffset + kPointerSize;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSTypedArray);
 };
 
 
+class JSDataView: public JSArrayBufferView {
+ public:
+  // Only neuters this DataView
+  void Neuter();
+
+  // Casting.
+  static inline JSDataView* cast(Object* obj);
+
+  // Dispatched behavior.
+  DECLARE_PRINTER(JSDataView)
+  DECLARE_VERIFIER(JSDataView)
+
+  static const int kSize = kViewSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSDataView);
+};
+
+
 // Foreign describes objects pointing from JavaScript to C structures.
 // Since they cannot contain references to JS HeapObjects they can be
 // placed in old_data_space.
diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc
index b2abc81..dbf9ad7 100644
--- a/src/optimizing-compiler-thread.cc
+++ b/src/optimizing-compiler-thread.cc
@@ -39,7 +39,9 @@
 
 void OptimizingCompilerThread::Run() {
 #ifdef DEBUG
-  thread_id_ = ThreadId::Current().ToInteger();
+  { ScopedLock lock(thread_id_mutex_);
+    thread_id_ = ThreadId::Current().ToInteger();
+  }
 #endif
   Isolate::SetIsolateThreadLocals(isolate_, NULL);
   DisallowHeapAllocation no_allocation;
@@ -89,8 +91,9 @@
   ASSERT(status != OptimizingCompiler::FAILED);
 
   // The function may have already been optimized by OSR.  Simply continue.
-  // Mark it for installing before queuing so that we can be sure of the write
-  // order: marking first and (after being queued) installing code second.
+  // Use a mutex to make sure that functions marked for install
+  // are always also queued.
+  ScopedLock mark_and_queue(install_mutex_);
   { Heap::RelocationLock relocation_lock(isolate_->heap());
     AllowHandleDereference ahd;
     optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
@@ -106,12 +109,15 @@
   stop_semaphore_->Wait();
 
   if (FLAG_parallel_recompilation_delay != 0) {
-    InstallOptimizedFunctions();
     // Barrier when loading queue length is not necessary since the write
     // happens in CompileNext on the same thread.
-    while (NoBarrier_Load(&queue_length_) > 0) {
-      CompileNext();
-      InstallOptimizedFunctions();
+    while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
+    InstallOptimizedFunctions();
+  } else {
+    OptimizingCompiler* optimizing_compiler;
+    while (input_queue_.Dequeue(&optimizing_compiler)) {
+      // The optimizing compiler is allocated in the CompilationInfo's zone.
+      delete optimizing_compiler->info();
     }
   }
 
@@ -127,12 +133,13 @@
 void OptimizingCompilerThread::InstallOptimizedFunctions() {
   ASSERT(!IsOptimizerThread());
   HandleScope handle_scope(isolate_);
-  int functions_installed = 0;
-  while (!output_queue_.IsEmpty()) {
-    OptimizingCompiler* compiler;
-    output_queue_.Dequeue(&compiler);
+  OptimizingCompiler* compiler;
+  while (true) {
+    { // Memory barrier to ensure marked functions are queued.
+      ScopedLock marked_and_queued(install_mutex_);
+      if (!output_queue_.Dequeue(&compiler)) return;
+    }
     Compiler::InstallOptimizedCode(compiler);
-    functions_installed++;
   }
 }
 
@@ -151,6 +158,7 @@
 #ifdef DEBUG
 bool OptimizingCompilerThread::IsOptimizerThread() {
   if (!FLAG_parallel_recompilation) return false;
+  ScopedLock lock(thread_id_mutex_);
   return ThreadId::Current().ToInteger() == thread_id_;
 }
 #endif
diff --git a/src/optimizing-compiler-thread.h b/src/optimizing-compiler-thread.h
index 8cb5e2d..59c94cb 100644
--- a/src/optimizing-compiler-thread.h
+++ b/src/optimizing-compiler-thread.h
@@ -46,10 +46,12 @@
       Thread("OptimizingCompilerThread"),
 #ifdef DEBUG
       thread_id_(0),
+      thread_id_mutex_(OS::CreateMutex()),
 #endif
       isolate_(isolate),
       stop_semaphore_(OS::CreateSemaphore(0)),
       input_queue_semaphore_(OS::CreateSemaphore(0)),
+      install_mutex_(OS::CreateMutex()),
       time_spent_compiling_(0),
       time_spent_total_(0) {
     NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
@@ -88,6 +90,7 @@
  private:
 #ifdef DEBUG
   int thread_id_;
+  Mutex* thread_id_mutex_;
 #endif
 
   Isolate* isolate_;
@@ -95,6 +98,7 @@
   Semaphore* input_queue_semaphore_;
   UnboundQueue<OptimizingCompiler*> input_queue_;
   UnboundQueue<OptimizingCompiler*> output_queue_;
+  Mutex* install_mutex_;
   volatile AtomicWord stop_thread_;
   volatile Atomic32 queue_length_;
   int64_t time_spent_compiling_;
diff --git a/src/parser.cc b/src/parser.cc
index 996d677..b320299 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -566,7 +566,6 @@
 
 
 FunctionLiteral* Parser::ParseProgram() {
-  ZoneScope zone_scope(zone(), DONT_DELETE_ON_EXIT);
   HistogramTimerScope timer(isolate()->counters()->parse());
   Handle<String> source(String::cast(script_->source()));
   isolate()->counters()->total_parse_size()->Increment(source->length());
@@ -583,11 +582,11 @@
     ExternalTwoByteStringUtf16CharacterStream stream(
         Handle<ExternalTwoByteString>::cast(source), 0, source->length());
     scanner_.Initialize(&stream);
-    result = DoParseProgram(info(), source, &zone_scope);
+    result = DoParseProgram(info(), source);
   } else {
     GenericStringUtf16CharacterStream stream(source, 0, source->length());
     scanner_.Initialize(&stream);
-    result = DoParseProgram(info(), source, &zone_scope);
+    result = DoParseProgram(info(), source);
   }
 
   if (FLAG_trace_parse && result != NULL) {
@@ -608,8 +607,7 @@
 
 
 FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
-                                        Handle<String> source,
-                                        ZoneScope* zone_scope) {
+                                        Handle<String> source) {
   ASSERT(top_scope_ == NULL);
   ASSERT(target_stack_ == NULL);
   if (pre_parse_data_ != NULL) pre_parse_data_->Initialize();
@@ -690,15 +688,11 @@
   // Make sure the target stack is empty.
   ASSERT(target_stack_ == NULL);
 
-  // If there was a syntax error we have to get rid of the AST
-  // and it is not safe to do so before the scope has been deleted.
-  if (result == NULL) zone_scope->DeleteOnExit();
   return result;
 }
 
 
 FunctionLiteral* Parser::ParseLazy() {
-  ZoneScope zone_scope(zone(), DONT_DELETE_ON_EXIT);
   HistogramTimerScope timer(isolate()->counters()->parse_lazy());
   Handle<String> source(String::cast(script_->source()));
   isolate()->counters()->total_parse_size()->Increment(source->length());
@@ -713,12 +707,12 @@
         Handle<ExternalTwoByteString>::cast(source),
         shared_info->start_position(),
         shared_info->end_position());
-    result = ParseLazy(&stream, &zone_scope);
+    result = ParseLazy(&stream);
   } else {
     GenericStringUtf16CharacterStream stream(source,
                                              shared_info->start_position(),
                                              shared_info->end_position());
-    result = ParseLazy(&stream, &zone_scope);
+    result = ParseLazy(&stream);
   }
 
   if (FLAG_trace_parse && result != NULL) {
@@ -730,8 +724,7 @@
 }
 
 
-FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
-                                   ZoneScope* zone_scope) {
+FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
   Handle<SharedFunctionInfo> shared_info = info()->shared_info();
   scanner_.Initialize(source);
   ASSERT(top_scope_ == NULL);
@@ -779,10 +772,7 @@
   // Make sure the target stack is empty.
   ASSERT(target_stack_ == NULL);
 
-  // If there was a stack overflow we have to get rid of AST and it is
-  // not safe to do before scope has been deleted.
   if (result == NULL) {
-    zone_scope->DeleteOnExit();
     if (stack_overflow_) isolate()->StackOverflow();
   } else {
     Handle<String> inferred_name(shared_info->inferred_name());
@@ -889,8 +879,8 @@
       // Still processing directive prologue?
       if ((e_stat = stat->AsExpressionStatement()) != NULL &&
           (literal = e_stat->expression()->AsLiteral()) != NULL &&
-          literal->handle()->IsString()) {
-        Handle<String> directive = Handle<String>::cast(literal->handle());
+          literal->value()->IsString()) {
+        Handle<String> directive = Handle<String>::cast(literal->value());
 
         // Check "use strict" directive (ES5 14.1).
         if (top_scope_->is_classic_mode() &&
@@ -3067,10 +3057,10 @@
       Expression* y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
 
       // Compute some expressions involving only number literals.
-      if (x && x->AsLiteral() && x->AsLiteral()->handle()->IsNumber() &&
-          y && y->AsLiteral() && y->AsLiteral()->handle()->IsNumber()) {
-        double x_val = x->AsLiteral()->handle()->Number();
-        double y_val = y->AsLiteral()->handle()->Number();
+      if (x && x->AsLiteral() && x->AsLiteral()->value()->IsNumber() &&
+          y && y->AsLiteral() && y->AsLiteral()->value()->IsNumber()) {
+        double x_val = x->AsLiteral()->value()->Number();
+        double y_val = y->AsLiteral()->value()->Number();
 
         switch (op) {
           case Token::ADD:
@@ -3169,7 +3159,7 @@
     Expression* expression = ParseUnaryExpression(CHECK_OK);
 
     if (expression != NULL && (expression->AsLiteral() != NULL)) {
-      Handle<Object> literal = expression->AsLiteral()->handle();
+      Handle<Object> literal = expression->AsLiteral()->value();
       if (op == Token::NOT) {
         // Convert the literal to a boolean condition and negate it.
         bool condition = literal->BooleanValue();
@@ -3734,18 +3724,6 @@
 }
 
 
-bool CompileTimeValue::ArrayLiteralElementNeedsInitialization(
-    Expression* value) {
-  // If value is a literal the property value is already set in the
-  // boilerplate object.
-  if (value->AsLiteral() != NULL) return false;
-  // If value is a materialized literal the property value is already set
-  // in the boilerplate object if it is simple.
-  if (CompileTimeValue::IsCompileTimeValue(value)) return false;
-  return true;
-}
-
-
 Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
   Factory* factory = Isolate::Current()->factory();
   ASSERT(IsCompileTimeValue(expression));
@@ -3783,7 +3761,7 @@
 
 Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
   if (expression->AsLiteral() != NULL) {
-    return expression->AsLiteral()->handle();
+    return expression->AsLiteral()->value();
   }
   if (CompileTimeValue::IsCompileTimeValue(expression)) {
     return CompileTimeValue::GetValue(expression);
@@ -3896,7 +3874,7 @@
     // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
     // value for COMPUTED properties, the real value is filled in at
     // runtime. The enumeration order is maintained.
-    Handle<Object> key = property->key()->handle();
+    Handle<Object> key = property->key()->value();
     Handle<Object> value = GetBoilerplateValue(property->value());
 
     // Ensure objects that may, at any point in time, contain fields with double
diff --git a/src/parser.h b/src/parser.h
index 38633b0..c3a7edf 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -562,8 +562,7 @@
   };
 
   FunctionLiteral* ParseLazy();
-  FunctionLiteral* ParseLazy(Utf16CharacterStream* source,
-                             ZoneScope* zone_scope);
+  FunctionLiteral* ParseLazy(Utf16CharacterStream* source);
 
   Isolate* isolate() { return isolate_; }
   Zone* zone() const { return zone_; }
@@ -571,8 +570,7 @@
 
   // Called by ParseProgram after setting up the scanner.
   FunctionLiteral* DoParseProgram(CompilationInfo* info,
-                                  Handle<String> source,
-                                  ZoneScope* zone_scope);
+                                  Handle<String> source);
 
   // Report syntax error
   void ReportUnexpectedToken(Token::Value token);
@@ -888,8 +886,6 @@
 
   static bool IsCompileTimeValue(Expression* expression);
 
-  static bool ArrayLiteralElementNeedsInitialization(Expression* value);
-
   // Get the value as a compile time value.
   static Handle<FixedArray> GetValue(Expression* expression);
 
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 35427d4..bda9f92 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -79,12 +79,6 @@
 }
 
 
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-  __asm__ __volatile__("" : : : "memory");
-  // An x86 store acts as a release barrier.
-  *ptr = value;
-}
-
 const char* OS::LocalTimezone(double time) {
   if (std::isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 4305ccb..e2c2c42 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -85,12 +85,6 @@
 }
 
 
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-  __asm__ __volatile__("" : : : "memory");
-  *ptr = value;
-}
-
-
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
   return 0;  // FreeBSD runs on anything.
 }
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 22f2245..4a9bb7e 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -308,19 +308,6 @@
 }
 
 
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-#if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \
-    (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__))
-  // Only use on ARM or MIPS hardware.
-  MemoryBarrier();
-#else
-  __asm__ __volatile__("" : : : "memory");
-  // An x86 store acts as a release barrier.
-#endif
-  *ptr = value;
-}
-
-
 const char* OS::LocalTimezone(double time) {
   if (std::isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index eea1726..b21166d 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -295,12 +295,6 @@
 }
 
 
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-  OSMemoryBarrier();
-  *ptr = value;
-}
-
-
 const char* OS::LocalTimezone(double time) {
   if (std::isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 0a7cc80..f2d9dd4 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -117,13 +117,6 @@
 }
 
 
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-  __asm__ __volatile__("" : : : "memory");
-  // An x86 store acts as a release barrier.
-  *ptr = value;
-}
-
-
 const char* OS::LocalTimezone(double time) {
   if (std::isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 5fb28c8..4b0094f 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -111,12 +111,6 @@
 }
 
 
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-  __asm__ __volatile__("" : : : "memory");
-  *ptr = value;
-}
-
-
 const char* OS::LocalTimezone(double time) {
   if (std::isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 6795844..600962e 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1485,12 +1485,6 @@
 }
 
 
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-
 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 
diff --git a/src/platform.h b/src/platform.h
index 86706fe..24d21cb 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -100,7 +100,6 @@
 
 #endif  // WIN32
 
-#include "atomicops.h"
 #include "lazy-instance.h"
 #include "platform-tls.h"
 #include "utils.h"
@@ -330,8 +329,6 @@
   // the platform doesn't care. Guaranteed to be a power of two.
   static int ActivationFrameAlignment();
 
-  static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
-
 #if defined(V8_TARGET_ARCH_IA32)
   // Limit below which the extra overhead of the MemCopy function is likely
   // to outweigh the benefits of faster copying.
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 23cad95..1824efa 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -315,7 +315,7 @@
 
 
 void PrettyPrinter::VisitLiteral(Literal* node) {
-  PrintLiteral(node->handle(), true);
+  PrintLiteral(node->value(), true);
 }
 
 
@@ -379,11 +379,11 @@
 void PrettyPrinter::VisitProperty(Property* node) {
   Expression* key = node->key();
   Literal* literal = key->AsLiteral();
-  if (literal != NULL && literal->handle()->IsInternalizedString()) {
+  if (literal != NULL && literal->value()->IsInternalizedString()) {
     Print("(");
     Visit(node->obj());
     Print(").");
-    PrintLiteral(literal->handle(), false);
+    PrintLiteral(literal->value(), false);
   } else {
     Visit(node->obj());
     Print("[");
@@ -999,7 +999,7 @@
 
 // TODO(svenpanne) Start with IndentedScope.
 void AstPrinter::VisitLiteral(Literal* node) {
-  PrintLiteralIndented("LITERAL", node->handle(), true);
+  PrintLiteralIndented("LITERAL", node->value(), true);
 }
 
 
@@ -1102,8 +1102,8 @@
   IndentedScope indent(this, "PROPERTY");
   Visit(node->obj());
   Literal* literal = node->key()->AsLiteral();
-  if (literal != NULL && literal->handle()->IsInternalizedString()) {
-    PrintLiteralIndented("NAME", literal->handle(), false);
+  if (literal != NULL && literal->value()->IsInternalizedString()) {
+    PrintLiteralIndented("NAME", literal->value(), false);
   } else {
     PrintIndentedVisit("KEY", node->key());
   }
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index d6e8a37..6f9a601 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -46,9 +46,9 @@
 CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
                      const char* name_prefix,
                      const char* name,
+                     int security_token_id,
                      const char* resource_name,
-                     int line_number,
-                     int security_token_id)
+                     int line_number)
     : tag_(tag),
       name_prefix_(name_prefix),
       name_(name),
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index d15c400..8505b5b 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -183,7 +183,9 @@
   return size;
 }
 
+
 const char* const CodeEntry::kEmptyNamePrefix = "";
+const char* const CodeEntry::kEmptyResourceName = "";
 
 
 CodeEntry::~CodeEntry() {
@@ -296,12 +298,7 @@
 
 
 ProfileTree::ProfileTree()
-    : root_entry_(Logger::FUNCTION_TAG,
-                  "",
-                  "(root)",
-                  "",
-                  0,
-                  TokenEnumerator::kNoSecurityToken),
+    : root_entry_(Logger::FUNCTION_TAG, "", "(root)"),
       next_node_id_(1),
       root_(new ProfileNode(this, &root_entry_)) {
 }
@@ -797,9 +794,9 @@
   CodeEntry* entry = new CodeEntry(tag,
                                    CodeEntry::kEmptyNamePrefix,
                                    GetFunctionName(name),
+                                   TokenEnumerator::kNoSecurityToken,
                                    GetName(resource_name),
-                                   line_number,
-                                   TokenEnumerator::kNoSecurityToken);
+                                   line_number);
   code_entries_.Add(entry);
   return entry;
 }
@@ -809,10 +806,7 @@
                                                const char* name) {
   CodeEntry* entry = new CodeEntry(tag,
                                    CodeEntry::kEmptyNamePrefix,
-                                   GetFunctionName(name),
-                                   "",
-                                   v8::CpuProfileNode::kNoLineNumberInfo,
-                                   TokenEnumerator::kNoSecurityToken);
+                                   GetFunctionName(name));
   code_entries_.Add(entry);
   return entry;
 }
@@ -824,8 +818,6 @@
   CodeEntry* entry = new CodeEntry(tag,
                                    name_prefix,
                                    GetName(name),
-                                   "",
-                                   v8::CpuProfileNode::kNoLineNumberInfo,
                                    TokenEnumerator::kInheritsSecurityToken);
   code_entries_.Add(entry);
   return entry;
@@ -837,8 +829,6 @@
   CodeEntry* entry = new CodeEntry(tag,
                                    "args_count: ",
                                    GetName(args_count),
-                                   "",
-                                   v8::CpuProfileNode::kNoLineNumberInfo,
                                    TokenEnumerator::kInheritsSecurityToken);
   code_entries_.Add(entry);
   return entry;
@@ -910,7 +900,7 @@
     Address start;
     CodeEntry* pc_entry = code_map_.FindEntry(sample.pc, &start);
     // If pc is in the function code before it set up stack frame or after the
-    // frame was destroyed SafeStackTraceFrameIterator incorrectly thinks that
+    // frame was destroyed SafeStackFrameIterator incorrectly thinks that
     // ebp contains return address of the current function and skips caller's
     // frame. Check for this case and just skip such samples.
     if (pc_entry) {
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 7a5e1f2..f8534e4 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -99,9 +99,9 @@
   INLINE(CodeEntry(Logger::LogEventsAndTags tag,
                    const char* name_prefix,
                    const char* name,
-                   const char* resource_name,
-                   int line_number,
-                   int security_token_id));
+                   int security_token_id = TokenEnumerator::kNoSecurityToken,
+                   const char* resource_name = CodeEntry::kEmptyResourceName,
+                   int line_number = v8::CpuProfileNode::kNoLineNumberInfo));
   ~CodeEntry();
 
   INLINE(bool is_js_function() const) { return is_js_function_tag(tag_); }
@@ -125,6 +125,7 @@
   bool IsSameAs(CodeEntry* entry) const;
 
   static const char* const kEmptyNamePrefix;
+  static const char* const kEmptyResourceName;
 
  private:
   Logger::LogEventsAndTags tag_;
diff --git a/src/property.h b/src/property.h
index f36bc7a..5213ee6 100644
--- a/src/property.h
+++ b/src/property.h
@@ -112,7 +112,7 @@
                              JSFunction* function,
                              PropertyAttributes attributes)
       : Descriptor(key, function, attributes, CONSTANT_FUNCTION,
-                   Representation::Tagged()) {}
+                   Representation::HeapObject()) {}
 };
 
 
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 9b7dd34..00f7de4 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -153,9 +153,6 @@
 void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
   // See AlwaysFullCompiler (in compiler.cc) comment on why we need
   // Debug::has_break_points().
-  ASSERT(function->IsMarkedForLazyRecompilation() ||
-         function->IsMarkedForParallelRecompilation() ||
-         function->IsOptimized());
   if (!FLAG_use_osr ||
       isolate_->DebuggerHasBreakPoints() ||
       function->IsBuiltin()) {
@@ -273,12 +270,21 @@
     if (shared_code->kind() != Code::FUNCTION) continue;
     if (function->IsInRecompileQueue()) continue;
 
-    // Attempt OSR if we are still running unoptimized code even though the
-    // the function has long been marked or even already been optimized.
-    if (!frame->is_optimized() &&
+    if (FLAG_always_osr &&
+        shared_code->allow_osr_at_loop_nesting_level() == 0) {
+      // Testing mode: always try an OSR compile for every function.
+      for (int i = 0; i < Code::kMaxLoopNestingMarker; i++) {
+        // TODO(titzer): fix AttemptOnStackReplacement to avoid this dumb loop.
+        shared_code->set_allow_osr_at_loop_nesting_level(i);
+        AttemptOnStackReplacement(function);
+      }
+      // Fall through and do a normal optimized compile as well.
+    } else if (!frame->is_optimized() &&
         (function->IsMarkedForLazyRecompilation() ||
          function->IsMarkedForParallelRecompilation() ||
          function->IsOptimized())) {
+      // Attempt OSR if we are still running unoptimized code even though the
+      // the function has long been marked or even already been optimized.
       int ticks = shared_code->profiler_ticks();
       int allowance = kOSRCodeSizeAllowanceBase +
                       ticks * kOSRCodeSizeAllowancePerTick;
diff --git a/src/runtime.cc b/src/runtime.cc
index 0b7a7ca..081774f 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -650,23 +650,17 @@
 }
 
 
-static void ArrayBufferWeakCallback(v8::Isolate* external_isolate,
-                                    Persistent<Value>* object,
-                                    void* data) {
-  Isolate* isolate = reinterpret_cast<Isolate*>(external_isolate);
-  HandleScope scope(isolate);
-  Handle<Object> internal_object = Utils::OpenPersistent(object);
-  Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(*internal_object));
+void Runtime::FreeArrayBuffer(Isolate* isolate,
+                              JSArrayBuffer* phantom_array_buffer) {
+  if (phantom_array_buffer->is_external()) return;
 
-  if (!array_buffer->is_external()) {
-    size_t allocated_length = NumberToSize(
-        isolate, array_buffer->byte_length());
-    isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
-        -static_cast<intptr_t>(allocated_length));
-    CHECK(V8::ArrayBufferAllocator() != NULL);
-    V8::ArrayBufferAllocator()->Free(data);
-  }
-  object->Dispose(external_isolate);
+  size_t allocated_length = NumberToSize(
+      isolate, phantom_array_buffer->byte_length());
+
+  isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
+      -static_cast<intptr_t>(allocated_length));
+  CHECK(V8::ArrayBufferAllocator() != NULL);
+  V8::ArrayBufferAllocator()->Free(phantom_array_buffer->backing_store());
 }
 
 
@@ -691,7 +685,7 @@
 
   array_buffer->set_weak_next(isolate->heap()->array_buffers_list());
   isolate->heap()->set_array_buffers_list(*array_buffer);
-  array_buffer->set_weak_first_array(isolate->heap()->undefined_value());
+  array_buffer->set_weak_first_view(isolate->heap()->undefined_value());
 }
 
 
@@ -711,10 +705,6 @@
 
   SetupArrayBuffer(isolate, array_buffer, false, data, allocated_length);
 
-  Handle<Object> persistent = isolate->global_handles()->Create(*array_buffer);
-  GlobalHandles::MakeWeak(persistent.location(), data, ArrayBufferWeakCallback);
-  GlobalHandles::MarkIndependent(persistent.location());
-
   isolate->heap()->AdjustAmountOfExternalAllocatedMemory(allocated_length);
 
   return true;
@@ -860,8 +850,8 @@
 
   Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length);
   holder->set_length(*length_obj);
-  holder->set_weak_next(buffer->weak_first_array());
-  buffer->set_weak_first_array(*holder);
+  holder->set_weak_next(buffer->weak_first_view());
+  buffer->set_weak_first_view(*holder);
 
   Handle<ExternalArray> elements =
       isolate->factory()->NewExternalArray(
@@ -1013,6 +1003,223 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewInitialize) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 4);
+  CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, byte_offset, 2);
+  CONVERT_ARG_HANDLE_CHECKED(Object, byte_length, 3);
+
+  holder->set_buffer(*buffer);
+  ASSERT(byte_offset->IsNumber());
+  ASSERT(
+      NumberToSize(isolate, buffer->byte_length()) >=
+        NumberToSize(isolate, *byte_offset)
+        + NumberToSize(isolate, *byte_length));
+  holder->set_byte_offset(*byte_offset);
+  ASSERT(byte_length->IsNumber());
+  holder->set_byte_length(*byte_length);
+
+  holder->set_weak_next(buffer->weak_first_view());
+  buffer->set_weak_first_view(*holder);
+
+  return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGetBuffer) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSDataView, data_view, 0);
+  return data_view->buffer();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGetByteOffset) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSDataView, data_view, 0);
+  return data_view->byte_offset();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGetByteLength) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSDataView, data_view, 0);
+  return data_view->byte_length();
+}
+
+
+inline static bool NeedToFlipBytes(bool is_little_endian) {
+#ifdef V8_TARGET_LITTLE_ENDIAN
+  return !is_little_endian;
+#else
+  return is_little_endian;
+#endif
+}
+
+
+template<int n>
+inline void CopyBytes(uint8_t* target, uint8_t* source) {
+  for (int i = 0; i < n; i++) {
+    *(target++) = *(source++);
+  }
+}
+
+
+template<int n>
+inline void FlipBytes(uint8_t* target, uint8_t* source) {
+  source = source + (n-1);
+  for (int i = 0; i < n; i++) {
+    *(target++) = *(source--);
+  }
+}
+
+
+template<typename T>
+inline static bool DataViewGetValue(
+    Isolate* isolate,
+    Handle<JSDataView> data_view,
+    Handle<Object> byte_offset_obj,
+    bool is_little_endian,
+    T* result) {
+  size_t byte_offset = NumberToSize(isolate, *byte_offset_obj);
+  Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
+
+  size_t data_view_byte_offset =
+      NumberToSize(isolate, data_view->byte_offset());
+  size_t data_view_byte_length =
+      NumberToSize(isolate, data_view->byte_length());
+  if (byte_offset + sizeof(T) > data_view_byte_length ||
+      byte_offset + sizeof(T) < byte_offset)  {  // overflow
+    return false;
+  }
+
+  union Value {
+    T data;
+    uint8_t bytes[sizeof(T)];
+  };
+
+  Value value;
+  size_t buffer_offset = data_view_byte_offset + byte_offset;
+  ASSERT(
+      NumberToSize(isolate, buffer->byte_length())
+      >= buffer_offset + sizeof(T));
+  uint8_t* source =
+        static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
+  if (NeedToFlipBytes(is_little_endian)) {
+    FlipBytes<sizeof(T)>(value.bytes, source);
+  } else {
+    CopyBytes<sizeof(T)>(value.bytes, source);
+  }
+  *result = value.data;
+  return true;
+}
+
+
+template<typename T>
+static bool DataViewSetValue(
+    Isolate* isolate,
+    Handle<JSDataView> data_view,
+    Handle<Object> byte_offset_obj,
+    bool is_little_endian,
+    T data) {
+  size_t byte_offset = NumberToSize(isolate, *byte_offset_obj);
+  Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
+
+  size_t data_view_byte_offset =
+      NumberToSize(isolate, data_view->byte_offset());
+  size_t data_view_byte_length =
+      NumberToSize(isolate, data_view->byte_length());
+  if (byte_offset + sizeof(T) > data_view_byte_length ||
+      byte_offset + sizeof(T) < byte_offset)  {  // overflow
+    return false;
+  }
+
+  union Value {
+    T data;
+    uint8_t bytes[sizeof(T)];
+  };
+
+  Value value;
+  value.data = data;
+  size_t buffer_offset = data_view_byte_offset + byte_offset;
+  ASSERT(
+      NumberToSize(isolate, buffer->byte_length())
+      >= buffer_offset + sizeof(T));
+  uint8_t* target =
+        static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
+  if (NeedToFlipBytes(is_little_endian)) {
+    FlipBytes<sizeof(T)>(target, value.bytes);
+  } else {
+    CopyBytes<sizeof(T)>(target, value.bytes);
+  }
+  return true;
+}
+
+
+#define DATA_VIEW_GETTER(TypeName, Type, Converter)                           \
+  RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewGet##TypeName) {             \
+    HandleScope scope(isolate);                                               \
+    ASSERT(args.length() == 3);                                               \
+    CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);                        \
+    CONVERT_ARG_HANDLE_CHECKED(Object, offset, 1);                            \
+    CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 2);                         \
+    Type result;                                                              \
+    if (DataViewGetValue(                                                     \
+          isolate, holder, offset, is_little_endian, &result)) {              \
+      return isolate->heap()->Converter(result);                              \
+    } else {                                                                  \
+      return isolate->Throw(*isolate->factory()->NewRangeError(               \
+          "invalid_data_view_accessor_offset",                                \
+          HandleVector<Object>(NULL, 0)));                                    \
+    }                                                                         \
+  }
+
+DATA_VIEW_GETTER(Uint8, uint8_t, NumberFromUint32)
+DATA_VIEW_GETTER(Int8, int8_t, NumberFromInt32)
+DATA_VIEW_GETTER(Uint16, uint16_t, NumberFromUint32)
+DATA_VIEW_GETTER(Int16, int16_t, NumberFromInt32)
+DATA_VIEW_GETTER(Uint32, uint32_t, NumberFromUint32)
+DATA_VIEW_GETTER(Int32, int32_t, NumberFromInt32)
+DATA_VIEW_GETTER(Float32, float, NumberFromDouble)
+DATA_VIEW_GETTER(Float64, double, NumberFromDouble)
+
+#undef DATA_VIEW_GETTER
+
+#define DATA_VIEW_SETTER(TypeName, Type)                                      \
+  RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewSet##TypeName) {             \
+    HandleScope scope(isolate);                                               \
+    ASSERT(args.length() == 4);                                               \
+    CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);                        \
+    CONVERT_ARG_HANDLE_CHECKED(Object, offset, 1);                            \
+    CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);                             \
+    CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 3);                         \
+    Type v = static_cast<Type>(value->Number());                              \
+    if (DataViewSetValue(                                                     \
+          isolate, holder, offset, is_little_endian, v)) {                    \
+      return isolate->heap()->undefined_value();                              \
+    } else {                                                                  \
+      return isolate->Throw(*isolate->factory()->NewRangeError(               \
+          "invalid_data_view_accessor_offset",                                \
+          HandleVector<Object>(NULL, 0)));                                    \
+    }                                                                         \
+  }
+
+DATA_VIEW_SETTER(Uint8, uint8_t)
+DATA_VIEW_SETTER(Int8, int8_t)
+DATA_VIEW_SETTER(Uint16, uint16_t)
+DATA_VIEW_SETTER(Int16, int16_t)
+DATA_VIEW_SETTER(Uint32, uint32_t)
+DATA_VIEW_SETTER(Int32, int32_t)
+DATA_VIEW_SETTER(Float32, float)
+DATA_VIEW_SETTER(Float64, double)
+
+#undef DATA_VIEW_SETTER
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInitialize) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -1493,7 +1700,7 @@
   CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
   CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
   Handle<Object> result =
-      RegExpImpl::Compile(re, pattern, flags, isolate->runtime_zone());
+      RegExpImpl::Compile(re, pattern, flags);
   if (result.is_null()) return Failure::Exception();
   return *result;
 }
@@ -2593,18 +2800,23 @@
 
   JavaScriptFrameIterator stack_iterator(isolate);
   JavaScriptFrame* frame = stack_iterator.frame();
-  JSFunction* function = JSFunction::cast(frame->function());
-  RUNTIME_ASSERT(function->shared()->is_generator());
-  ASSERT_EQ(function, generator_object->function());
+  RUNTIME_ASSERT(JSFunction::cast(frame->function())->shared()->is_generator());
+  ASSERT_EQ(JSFunction::cast(frame->function()), generator_object->function());
+
+  // The caller should have saved the context and continuation already.
+  ASSERT_EQ(generator_object->context(), Context::cast(frame->context()));
+  ASSERT_LT(0, generator_object->continuation());
 
   // We expect there to be at least two values on the operand stack: the return
   // value of the yield expression, and the argument to this runtime call.
   // Neither of those should be saved.
   int operands_count = frame->ComputeOperandsCount();
-  ASSERT(operands_count >= 2);
+  ASSERT_GE(operands_count, 2);
   operands_count -= 2;
 
   if (operands_count == 0) {
+    // Although it's semantically harmless to call this function with an
+    // operands_count of zero, it is also unnecessary.
     ASSERT_EQ(generator_object->operand_stack(),
               isolate->heap()->empty_fixed_array());
     ASSERT_EQ(generator_object->stack_handler_index(), -1);
@@ -2621,20 +2833,7 @@
     generator_object->set_stack_handler_index(stack_handler_index);
   }
 
-  // Set continuation down here to avoid side effects if the operand stack
-  // allocation fails.
-  intptr_t offset = frame->pc() - function->code()->instruction_start();
-  ASSERT(offset > 0 && Smi::IsValid(offset));
-  generator_object->set_continuation(static_cast<int>(offset));
-
-  // It's possible for the context to be other than the initial context even if
-  // there is no stack handler active.  For example, this is the case in the
-  // body of a "with" statement.  Therefore we always save the context.
-  generator_object->set_context(Context::cast(frame->context()));
-
-  // The return value is the hole for a suspend return, and anything else for a
-  // resume return.
-  return isolate->heap()->the_hole_value();
+  return isolate->heap()->undefined_value();
 }
 
 
@@ -3401,9 +3600,8 @@
   ASSERT(subject->IsFlat());
   ASSERT(replacement->IsFlat());
 
-  Zone* zone = isolate->runtime_zone();
-  ZoneScope zone_space(zone, DELETE_ON_EXIT);
-  ZoneList<int> indices(8, zone);
+  Zone zone(isolate);
+  ZoneList<int> indices(8, &zone);
   ASSERT_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
   String* pattern =
       String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
@@ -3412,7 +3610,7 @@
   int replacement_len = replacement->length();
 
   FindStringIndicesDispatch(
-      isolate, *subject, pattern, &indices, 0xffffffff, zone);
+      isolate, *subject, pattern, &indices, 0xffffffff, &zone);
 
   int matches = indices.length();
   if (matches == 0) return *subject;
@@ -3488,9 +3686,8 @@
   int subject_length = subject->length();
 
   // CompiledReplacement uses zone allocation.
-  Zone* zone = isolate->runtime_zone();
-  ZoneScope zonescope(zone, DELETE_ON_EXIT);
-  CompiledReplacement compiled_replacement(zone);
+  Zone zone(isolate);
+  CompiledReplacement compiled_replacement(&zone);
   bool simple_replace = compiled_replacement.Compile(replacement,
                                                      capture_count,
                                                      subject_length);
@@ -4023,15 +4220,14 @@
 
   int capture_count = regexp->CaptureCount();
 
-  Zone* zone = isolate->runtime_zone();
-  ZoneScope zone_space(zone, DELETE_ON_EXIT);
-  ZoneList<int> offsets(8, zone);
+  Zone zone(isolate);
+  ZoneList<int> offsets(8, &zone);
 
   while (true) {
     int32_t* match = global_cache.FetchNext();
     if (match == NULL) break;
-    offsets.Add(match[0], zone);  // start
-    offsets.Add(match[1], zone);  // end
+    offsets.Add(match[0], &zone);  // start
+    offsets.Add(match[1], &zone);  // end
   }
 
   if (global_cache.HasException()) return Failure::Exception();
@@ -6116,18 +6312,18 @@
 
   static const int kMaxInitialListCapacity = 16;
 
-  Zone* zone = isolate->runtime_zone();
-  ZoneScope scope(zone, DELETE_ON_EXIT);
+  Zone zone(isolate);
 
   // Find (up to limit) indices of separator and end-of-string in subject
   int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
-  ZoneList<int> indices(initial_capacity, zone);
+  ZoneList<int> indices(initial_capacity, &zone);
   if (!pattern->IsFlat()) FlattenString(pattern);
 
-  FindStringIndicesDispatch(isolate, *subject, *pattern, &indices, limit, zone);
+  FindStringIndicesDispatch(isolate, *subject, *pattern,
+                            &indices, limit, &zone);
 
   if (static_cast<uint32_t>(indices.length()) < limit) {
-    indices.Add(subject_length, zone);
+    indices.Add(subject_length, &zone);
   }
 
   // The list indices now contains the end of each part to create.
@@ -7930,12 +8126,7 @@
   ASSERT(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   ASSERT(V8::UseCrankshaft() && FLAG_parallel_recompilation);
-  OptimizingCompilerThread* opt_thread = isolate->optimizing_compiler_thread();
-  do {
-    // The function could have been marked for installing, but not queued just
-    // yet.  In this case, retry until installed.
-    opt_thread->InstallOptimizedFunctions();
-  } while (function->IsMarkedForInstallingRecompiledCode());
+  isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
   return function->code();
 }
 
@@ -9124,14 +9315,13 @@
   ASSERT_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
 
-  Zone* zone = isolate->runtime_zone();
   source = Handle<String>(source->TryFlattenGetString());
   // Optimized fast case where we only have ASCII characters.
   Handle<Object> result;
   if (source->IsSeqOneByteString()) {
-    result = JsonParser<true>::Parse(source, zone);
+    result = JsonParser<true>::Parse(source);
   } else {
-    result = JsonParser<false>::Parse(source, zone);
+    result = JsonParser<false>::Parse(source);
   }
   if (result.is_null()) {
     // Syntax error or stack overflow in scanner.
@@ -11584,6 +11774,58 @@
 }
 
 
+// Returns the list of step-in positions (text offset) in a function of the
+// stack frame in a range from the current debug break position to the end
+// of the corresponding statement.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+
+  // Check arguments.
+  Object* check;
+  { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
+    if (!maybe_check->ToObject(&check)) return maybe_check;
+  }
+  CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
+
+  // Get the frame where the debugging is performed.
+  StackFrame::Id id = UnwrapFrameId(wrapped_id);
+  JavaScriptFrameIterator frame_it(isolate, id);
+  JavaScriptFrame* frame = frame_it.frame();
+
+  Handle<SharedFunctionInfo> shared =
+      Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
+  Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared);
+
+  int len = 0;
+  Handle<JSArray> array(isolate->factory()->NewJSArray(10));
+  // Find the break point where execution has stopped.
+  BreakLocationIterator break_location_iterator(debug_info,
+                                                ALL_BREAK_LOCATIONS);
+
+  break_location_iterator.FindBreakLocationFromAddress(frame->pc());
+  int current_statement_pos = break_location_iterator.statement_position();
+
+  while (!break_location_iterator.Done()) {
+    if (break_location_iterator.IsStepInLocation(isolate)) {
+      Smi* position_value = Smi::FromInt(break_location_iterator.position());
+      JSObject::SetElement(array, len,
+          Handle<Object>(position_value, isolate),
+          NONE, kNonStrictMode);
+      len++;
+    }
+    // Advance iterator.
+    break_location_iterator.Next();
+    if (current_statement_pos !=
+        break_location_iterator.statement_position()) {
+      break;
+    }
+  }
+  return *array;
+}
+
+
 static const int kScopeDetailsTypeIndex = 0;
 static const int kScopeDetailsObjectIndex = 1;
 static const int kScopeDetailsSize = 2;
@@ -12178,6 +12420,15 @@
 // the same view of the values of parameters and local variables as if the
 // piece of JavaScript was evaluated at the point where the function on the
 // stack frame is currently stopped when we compile and run the (direct) eval.
+// Returns array of
+// #0: evaluate result
+// #1: local variables materizalized again as object after evaluation, contain
+//     original variable values as they remained on stack
+// #2: local variables materizalized as object before evaluation (and possibly
+//     modified by expression having been executed)
+// Since user expression only reaches (and modifies) copies of local variables,
+// those copies are returned to the caller to allow tracking the changes and
+// manually updating the actual variables.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
   HandleScope scope(isolate);
 
@@ -12277,7 +12528,23 @@
   }
 
   Handle<Object> receiver(frame->receiver(), isolate);
-  return DebugEvaluate(isolate, context, context_extension, receiver, source);
+  Object* evaluate_result_object;
+  { MaybeObject* maybe_result =
+    DebugEvaluate(isolate, context, context_extension, receiver, source);
+    if (!maybe_result->ToObject(&evaluate_result_object)) return maybe_result;
+  }
+  Handle<Object> evaluate_result(evaluate_result_object, isolate);
+
+  Handle<JSObject> local_scope_control_copy =
+      MaterializeLocalScopeWithFrameInspector(isolate, frame,
+                                              &frame_inspector);
+
+  Handle<FixedArray> resultArray = isolate->factory()->NewFixedArray(3);
+  resultArray->set(0, *evaluate_result);
+  resultArray->set(1, *local_scope_control_copy);
+  resultArray->set(2, *local_scope);
+
+  return *(isolate->factory()->NewJSArrayWithElements(resultArray));
 }
 
 
@@ -12851,8 +13118,7 @@
   CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
   CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1);
 
-  return *LiveEdit::CheckAndDropActivations(shared_array, do_drop,
-                                            isolate->runtime_zone());
+  return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
 }
 
 // Compares 2 strings line-by-line, then token-wise and returns diff in form
@@ -12900,8 +13166,7 @@
   }
   if (it.done()) return heap->undefined_value();
 
-  const char* error_message =
-      LiveEdit::RestartFrame(it.frame(), isolate->runtime_zone());
+  const char* error_message = LiveEdit::RestartFrame(it.frame());
   if (error_message) {
     return *(isolate->factory()->InternalizeUtf8String(error_message));
   }
@@ -13111,6 +13376,7 @@
   CONVERT_ARG_CHECKED(JSObject, error_object, 0);
   String* key = isolate->heap()->hidden_stack_trace_string();
   Object* result = error_object->GetHiddenProperty(key);
+  if (result->IsTheHole()) result = isolate->heap()->undefined_value();
   RUNTIME_ASSERT(result->IsJSArray() ||
                  result->IsString() ||
                  result->IsUndefined());
diff --git a/src/runtime.h b/src/runtime.h
index d2daddf..4928e78 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -369,6 +369,28 @@
   F(TypedArrayGetLength, 1, 1) \
   F(TypedArraySetFastCases, 3, 1) \
   \
+  F(DataViewInitialize, 4, 1) \
+  F(DataViewGetBuffer, 1, 1) \
+  F(DataViewGetByteLength, 1, 1) \
+  F(DataViewGetByteOffset, 1, 1) \
+  F(DataViewGetInt8, 3, 1) \
+  F(DataViewGetUint8, 3, 1) \
+  F(DataViewGetInt16, 3, 1) \
+  F(DataViewGetUint16, 3, 1) \
+  F(DataViewGetInt32, 3, 1) \
+  F(DataViewGetUint32, 3, 1) \
+  F(DataViewGetFloat32, 3, 1) \
+  F(DataViewGetFloat64, 3, 1) \
+  \
+  F(DataViewSetInt8, 4, 1) \
+  F(DataViewSetUint8, 4, 1) \
+  F(DataViewSetInt16, 4, 1) \
+  F(DataViewSetUint16, 4, 1) \
+  F(DataViewSetInt32, 4, 1) \
+  F(DataViewSetUint32, 4, 1) \
+  F(DataViewSetFloat32, 4, 1) \
+  F(DataViewSetFloat64, 4, 1) \
+  \
   /* Statements */ \
   F(NewClosure, 3, 1) \
   F(NewObject, 1, 1) \
@@ -468,6 +490,7 @@
   F(GetFrameCount, 1, 1) \
   F(GetFrameDetails, 2, 1) \
   F(GetScopeCount, 2, 1) \
+  F(GetStepInPositions, 2, 1) \
   F(GetScopeDetails, 4, 1) \
   F(GetFunctionScopeCount, 1, 1) \
   F(GetFunctionScopeDetails, 2, 1) \
@@ -769,6 +792,10 @@
       Handle<JSArrayBuffer> array_buffer,
       size_t allocated_length);
 
+  static void FreeArrayBuffer(
+      Isolate* isolate,
+      JSArrayBuffer* phantom_array_buffer);
+
   // Helper functions used stubs.
   static void PerformGC(Object* result);
 
diff --git a/src/runtime.js b/src/runtime.js
index 22f888d..348fd74 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -658,7 +658,6 @@
   throw %MakeTypeError('cannot_convert_to_primitive', []);
 }
 
-
 // ECMA-262, section 8.6.2.6, page 28.
 function DefaultString(x) {
   var toString = x.toString;
@@ -676,6 +675,12 @@
   throw %MakeTypeError('cannot_convert_to_primitive', []);
 }
 
+function ToPositiveInteger(x, rangeErrorName) {
+  var i = TO_INTEGER(x);
+  if (i < 0) throw %MakeRangeError(rangeErrorName);
+  return i;
+}
+
 
 // NOTE: Setting the prototype for Array must take place as early as
 // possible due to code generation for array literals.  When
diff --git a/src/sampler.cc b/src/sampler.cc
index 96b20f0..6d97110 100644
--- a/src/sampler.cc
+++ b/src/sampler.cc
@@ -636,7 +636,7 @@
     has_external_callback = false;
   }
 
-  SafeStackTraceFrameIterator it(isolate, fp, sp, sp, js_entry_sp);
+  SafeStackFrameIterator it(isolate, fp, sp, sp, js_entry_sp);
   int i = 0;
   while (!it.done() && i < TickSample::kMaxFramesCount) {
     stack[i++] = it.frame()->pc();
diff --git a/src/serialize.h b/src/serialize.h
index e5664ff..a6099af 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -613,7 +613,7 @@
     // unique ID, and deserializing several partial snapshots containing script
     // would cause dupes.
     ASSERT(!o->IsScript());
-    return o->IsString() || o->IsSharedFunctionInfo() ||
+    return o->IsName() || o->IsSharedFunctionInfo() ||
            o->IsHeapNumber() || o->IsCode() ||
            o->IsScopeInfo() ||
            o->map() == HEAP->fixed_cow_array_map();
diff --git a/src/spaces.cc b/src/spaces.cc
index 825d2a6..15381ea 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1790,49 +1790,20 @@
 }
 
 
-static void ClearCodeKindStatistics() {
-  Isolate* isolate = Isolate::Current();
+static void ClearCodeKindStatistics(int* code_kind_statistics) {
   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
-    isolate->code_kind_statistics()[i] = 0;
+    code_kind_statistics[i] = 0;
   }
 }
 
 
-static void ReportCodeKindStatistics() {
-  Isolate* isolate = Isolate::Current();
-  const char* table[Code::NUMBER_OF_KINDS] = { NULL };
-
-#define CASE(name)                            \
-  case Code::name: table[Code::name] = #name; \
-  break
-
-  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
-    switch (static_cast<Code::Kind>(i)) {
-      CASE(FUNCTION);
-      CASE(OPTIMIZED_FUNCTION);
-      CASE(STUB);
-      CASE(BUILTIN);
-      CASE(LOAD_IC);
-      CASE(KEYED_LOAD_IC);
-      CASE(STORE_IC);
-      CASE(KEYED_STORE_IC);
-      CASE(CALL_IC);
-      CASE(KEYED_CALL_IC);
-      CASE(UNARY_OP_IC);
-      CASE(BINARY_OP_IC);
-      CASE(COMPARE_IC);
-      CASE(COMPARE_NIL_IC);
-      CASE(TO_BOOLEAN_IC);
-    }
-  }
-
-#undef CASE
-
+static void ReportCodeKindStatistics(int* code_kind_statistics) {
   PrintF("\n   Code kind histograms: \n");
   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
-    if (isolate->code_kind_statistics()[i] > 0) {
-      PrintF("     %-20s: %10d bytes\n", table[i],
-          isolate->code_kind_statistics()[i]);
+    if (code_kind_statistics[i] > 0) {
+      PrintF("     %-20s: %10d bytes\n",
+             Code::Kind2String(static_cast<Code::Kind>(i)),
+             code_kind_statistics[i]);
     }
   }
   PrintF("\n");
@@ -1840,7 +1811,7 @@
 
 
 static int CollectHistogramInfo(HeapObject* obj) {
-  Isolate* isolate = Isolate::Current();
+  Isolate* isolate = obj->GetIsolate();
   InstanceType type = obj->map()->instance_type();
   ASSERT(0 <= type && type <= LAST_TYPE);
   ASSERT(isolate->heap_histograms()[type].name() != NULL);
@@ -2714,7 +2685,7 @@
   Isolate* isolate = Isolate::Current();
   CommentStatistic* comments_statistics =
       isolate->paged_space_comments_statistics();
-  ReportCodeKindStatistics();
+  ReportCodeKindStatistics(isolate->code_kind_statistics());
   PrintF("Code comment statistics (\"   [ comment-txt   :    size/   "
          "count  (average)\"):\n");
   for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
@@ -2732,7 +2703,7 @@
   Isolate* isolate = Isolate::Current();
   CommentStatistic* comments_statistics =
       isolate->paged_space_comments_statistics();
-  ClearCodeKindStatistics();
+  ClearCodeKindStatistics(isolate->code_kind_statistics());
   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
     comments_statistics[i].Clear();
   }
diff --git a/src/store-buffer.cc b/src/store-buffer.cc
index fd6366a..0386280 100644
--- a/src/store-buffer.cc
+++ b/src/store-buffer.cc
@@ -121,6 +121,7 @@
 
 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
   isolate->heap()->store_buffer()->Compact();
+  isolate->counters()->store_buffer_overflows()->Increment();
 }
 
 
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 1096225..9c4394e 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -471,7 +471,7 @@
   }
 
   JSFunction* fun = JSFunction::cast(f);
-  Object* perhaps_context = fun->unchecked_context();
+  Object* perhaps_context = fun->context();
   if (perhaps_context->IsHeapObject() &&
       heap->Contains(HeapObject::cast(perhaps_context)) &&
       perhaps_context->IsContext()) {
diff --git a/src/string.js b/src/string.js
index 44315bb..7e18687 100644
--- a/src/string.js
+++ b/src/string.js
@@ -495,8 +495,7 @@
       }
     }
   }
-  var resultBuilder = new ReplaceResultBuilder(subject, res);
-  var result = resultBuilder.generate();
+  var result = %StringBuilderConcat(res, res.length, subject);
   resultArray.length = 0;
   reusableReplaceArray = resultArray;
   return result;
@@ -645,6 +644,8 @@
 }
 
 
+var ArrayPushBuiltin = $Array.prototype.push;
+
 function StringSplitOnRegExp(subject, separator, limit, length) {
   %_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
 
@@ -664,13 +665,15 @@
   while (true) {
 
     if (startIndex === length) {
-      result.push(%_SubString(subject, currentIndex, length));
+      %_CallFunction(result, %_SubString(subject, currentIndex, length),
+                     ArrayPushBuiltin);
       break;
     }
 
     var matchInfo = DoRegExpExec(separator, subject, startIndex);
     if (matchInfo == null || length === (startMatch = matchInfo[CAPTURE0])) {
-      result.push(%_SubString(subject, currentIndex, length));
+      %_CallFunction(result, %_SubString(subject, currentIndex, length),
+                     ArrayPushBuiltin);
       break;
     }
     var endIndex = matchInfo[CAPTURE1];
@@ -681,7 +684,8 @@
       continue;
     }
 
-    result.push(%_SubString(subject, currentIndex, startMatch));
+    %_CallFunction(result, %_SubString(subject, currentIndex, startMatch),
+                   ArrayPushBuiltin);
 
     if (result.length === limit) break;
 
@@ -690,9 +694,10 @@
       var start = matchInfo[i++];
       var end = matchInfo[i++];
       if (end != -1) {
-        result.push(%_SubString(subject, start, end));
+        %_CallFunction(result, %_SubString(subject, start, end),
+                       ArrayPushBuiltin);
       } else {
-        result.push(void 0);
+        %_CallFunction(result, void 0, ArrayPushBuiltin);
       }
       if (result.length === limit) break outer_loop;
     }
@@ -950,43 +955,6 @@
   return "<sup>" + this + "</sup>";
 }
 
-
-// ReplaceResultBuilder support.
-function ReplaceResultBuilder(str) {
-  if (%_ArgumentsLength() > 1) {
-    this.elements = %_Arguments(1);
-  } else {
-    this.elements = new InternalArray();
-  }
-  this.special_string = str;
-}
-
-SetUpLockedPrototype(ReplaceResultBuilder,
-  $Array("elements", "special_string"), $Array(
-  "add", function(str) {
-    str = TO_STRING_INLINE(str);
-    if (str.length > 0) this.elements.push(str);
-  },
-  "addSpecialSlice", function(start, end) {
-    var len = end - start;
-    if (start < 0 || len <= 0) return;
-    if (start < 0x80000 && len < 0x800) {
-      this.elements.push((start << 11) | len);
-    } else {
-      // 0 < len <= String::kMaxLength and Smi::kMaxValue >= String::kMaxLength,
-      // so -len is a smi.
-      var elements = this.elements;
-      elements.push(-len);
-      elements.push(start);
-    }
-  },
-  "generate", function() {
-    var elements = this.elements;
-    return %StringBuilderConcat(elements, elements.length, this.special_string);
-  }
-));
-
-
 // -------------------------------------------------------------------
 
 function SetUpString() {
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 2a27f4f..e0839f1 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -43,7 +43,7 @@
 // StubCache implementation.
 
 
-StubCache::StubCache(Isolate* isolate, Zone* zone)
+StubCache::StubCache(Isolate* isolate)
     : isolate_(isolate) {
   ASSERT(isolate == Isolate::Current());
 }
@@ -644,7 +644,10 @@
   PROFILE(isolate_,
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
   GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(stub_holder, name, code);
+
+  if (CallStubCompiler::CanBeCached(function)) {
+    JSObject::UpdateMapCodeCache(stub_holder, name, code);
+  }
   return code;
 }
 
@@ -753,7 +756,9 @@
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
   GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(stub_holder, name, code);
+  if (CallStubCompiler::CanBeCached(function)) {
+    JSObject::UpdateMapCodeCache(stub_holder, name, code);
+  }
   return code;
 }
 
@@ -1108,12 +1113,7 @@
   LOG(isolate, ApiNamedPropertyAccess("store", recv, *name));
   PropertyCallbackArguments
       custom_args(isolate, callback->data(), recv, recv);
-  {
-    // Leaving JavaScript.
-    VMState<EXTERNAL> state(isolate);
-    ExternalCallbackScope call_scope(isolate, setter_address);
-    custom_args.Call(fun, v8::Utils::ToLocal(str), v8::Utils::ToLocal(value));
-  }
+  custom_args.Call(fun, v8::Utils::ToLocal(str), v8::Utils::ToLocal(value));
   RETURN_IF_SCHEDULED_EXCEPTION(isolate);
   return *value;
 }
@@ -1159,12 +1159,8 @@
   {
     // Use the interceptor getter.
     HandleScope scope(isolate);
-    v8::Handle<v8::Value> r;
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      r = callback_args.Call(getter, v8::Utils::ToLocal(name));
-    }
+    v8::Handle<v8::Value> r =
+        callback_args.Call(getter, v8::Utils::ToLocal(name));
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     if (!r.IsEmpty()) {
       Handle<Object> result = v8::Utils::OpenHandle(*r);
@@ -1229,12 +1225,8 @@
   {
     // Use the interceptor getter.
     HandleScope scope(isolate);
-    v8::Handle<v8::Value> r;
-    {
-      // Leaving JavaScript.
-      VMState<EXTERNAL> state(isolate);
-      r = callback_args.Call(getter, v8::Utils::ToLocal(name));
-    }
+    v8::Handle<v8::Value> r =
+        callback_args.Call(getter, v8::Utils::ToLocal(name));
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     if (!r.IsEmpty()) {
       *attrs = NONE;
@@ -1974,12 +1966,25 @@
 }
 
 
+bool CallStubCompiler::CanBeCached(Handle<JSFunction> function) {
+  if (function->shared()->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = function->shared()->builtin_function_id();
+#define CALL_GENERATOR_CASE(name) if (id == k##name) return false;
+    SITE_SPECIFIC_CALL_GENERATORS(CALL_GENERATOR_CASE)
+#undef CALL_GENERATOR_CASE
+  }
+
+  return true;
+}
+
+
 Handle<Code> CallStubCompiler::CompileCustomCall(
     Handle<Object> object,
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> fname) {
+    Handle<String> fname,
+    Code::StubType type) {
   ASSERT(HasCustomCallGenerator(function));
 
   if (function->shared()->HasBuiltinFunctionId()) {
@@ -1990,7 +1995,8 @@
                                                    holder,      \
                                                    cell,        \
                                                    function,    \
-                                                   fname);      \
+                                                   fname,       \
+                                                   type);       \
     }
     CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
 #undef CALL_GENERATOR_CASE
diff --git a/src/stub-cache.h b/src/stub-cache.h
index c085b51..6d70d34 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -367,7 +367,7 @@
   Factory* factory() { return isolate()->factory(); }
 
  private:
-  StubCache(Isolate* isolate, Zone* zone);
+  explicit StubCache(Isolate* isolate);
 
   Handle<Code> ComputeCallInitialize(int argc,
                                      RelocInfo::Mode mode,
@@ -969,7 +969,12 @@
   V(StringCharAt)                               \
   V(StringFromCharCode)                         \
   V(MathFloor)                                  \
-  V(MathAbs)
+  V(MathAbs)                                    \
+  V(ArrayCode)
+
+
+#define SITE_SPECIFIC_CALL_GENERATORS(V)        \
+  V(ArrayCode)
 
 
 class CallOptimization;
@@ -1012,6 +1017,7 @@
                                  Handle<Name> name);
 
   static bool HasCustomCallGenerator(Handle<JSFunction> function);
+  static bool CanBeCached(Handle<JSFunction> function);
 
  private:
   // Compiles a custom call constant/global IC.  For constant calls cell is
@@ -1021,14 +1027,16 @@
                                  Handle<JSObject> holder,
                                  Handle<Cell> cell,
                                  Handle<JSFunction> function,
-                                 Handle<String> name);
+                                 Handle<String> name,
+                                 Code::StubType type);
 
 #define DECLARE_CALL_GENERATOR(name)                                    \
   Handle<Code> Compile##name##Call(Handle<Object> object,               \
                                    Handle<JSObject> holder,             \
                                    Handle<Cell> cell,                   \
                                    Handle<JSFunction> function,         \
-                                   Handle<String> fname);
+                                   Handle<String> fname,                \
+                                   Code::StubType type);
   CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
 #undef DECLARE_CALL_GENERATOR
 
diff --git a/src/third_party/vtune/vtune-jit.cc b/src/third_party/vtune/vtune-jit.cc
index d3f7a68..0f35290 100644
--- a/src/third_party/vtune/vtune-jit.cc
+++ b/src/third_party/vtune/vtune-jit.cc
@@ -192,8 +192,7 @@
         if (*script != NULL) {
           // Get the source file name and set it to jmethod.source_file_name
          if ((*script->GetScriptName())->IsString()) {
-            Handle<String> script_name =
-                Handle<String>(String::Cast(*script->GetScriptName()));
+            Handle<String> script_name = script->GetScriptName()->ToString();
             temp_file_name = new char[script_name->Utf8Length() + 1];
             script_name->WriteUtf8(temp_file_name);
             jmethod.source_file_name = temp_file_name;
diff --git a/src/type-info.cc b/src/type-info.cc
index dee2a77..ae5bf97 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -138,6 +138,15 @@
 }
 
 
+bool TypeFeedbackOracle::StoreIsUninitialized(TypeFeedbackId ast_id) {
+  Handle<Object> map_or_code = GetInfo(ast_id);
+  if (map_or_code->IsMap()) return false;
+  if (!map_or_code->IsCode()) return true;
+  Handle<Code> code = Handle<Code>::cast(map_or_code);
+  return code->ic_state() == UNINITIALIZED;
+}
+
+
 bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) {
   Handle<Object> map_or_code = GetInfo(ast_id);
   if (map_or_code->IsMap()) return true;
@@ -247,7 +256,9 @@
 void TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
                                            Handle<String> name,
                                            SmallMapList* types) {
-  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC);
+  Code::Flags flags = Code::ComputeFlags(
+      Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+      Code::NORMAL, Code::LOAD_IC);
   CollectReceiverTypes(expr->PropertyFeedbackId(), name, flags, types);
 }
 
@@ -341,13 +352,11 @@
 }
 
 
-void TypeFeedbackOracle::CompareTypes(TypeFeedbackId id,
-                                      Handle<Type>* left_type,
-                                      Handle<Type>* right_type,
-                                      Handle<Type>* overall_type,
-                                      Handle<Type>* compare_nil_type) {
-  *left_type = *right_type = *overall_type = *compare_nil_type =
-      handle(Type::Any(), isolate_);
+void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
+                                     Handle<Type>* left_type,
+                                     Handle<Type>* right_type,
+                                     Handle<Type>* combined_type) {
+  *left_type = *right_type = *combined_type = handle(Type::Any(), isolate_);
   Handle<Object> info = GetInfo(id);
   if (!info->IsCode()) return;
   Handle<Code> code = Handle<Code>::cast(info);
@@ -364,10 +373,14 @@
   if (code->is_compare_ic_stub()) {
     int stub_minor_key = code->stub_info();
     CompareIC::StubInfoToType(
-        stub_minor_key, left_type, right_type, overall_type, map, isolate());
+        stub_minor_key, left_type, right_type, combined_type, map, isolate());
   } else if (code->is_compare_nil_ic_stub()) {
     CompareNilICStub::State state(code->compare_nil_state());
-    *compare_nil_type = CompareNilICStub::StateToType(isolate_, state, map);
+    *combined_type = CompareNilICStub::StateToType(isolate_, state, map);
+    Handle<Type> nil_type = handle(code->compare_nil_value() == kNullValue
+        ? Type::Null() : Type::Undefined(), isolate_);
+    *left_type = *right_type =
+        handle(Type::Union(*combined_type, nil_type), isolate_);
   }
 }
 
@@ -386,8 +399,7 @@
                                     Handle<Type>* left,
                                     Handle<Type>* right,
                                     Handle<Type>* result,
-                                    bool* has_fixed_right_arg,
-                                    int* fixed_right_arg_value) {
+                                    Maybe<int>* fixed_right_arg) {
   Handle<Object> object = GetInfo(id);
   *left = *right = *result = handle(Type::Any(), isolate_);
   if (!object->IsCode()) return;
@@ -396,10 +408,8 @@
 
   int minor_key = code->stub_info();
   BinaryOpIC::StubInfoToType(minor_key, left, right, result, isolate());
-  *has_fixed_right_arg =
-      BinaryOpStub::decode_has_fixed_right_arg_from_minor_key(minor_key);
-  *fixed_right_arg_value =
-      BinaryOpStub::decode_fixed_right_arg_value_from_minor_key(minor_key);
+  *fixed_right_arg =
+      BinaryOpStub::decode_fixed_right_arg_from_minor_key(minor_key);
 }
 
 
diff --git a/src/type-info.h b/src/type-info.h
index 3c2b831..aa1f509 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -244,6 +244,7 @@
   bool LoadIsMonomorphicNormal(Property* expr);
   bool LoadIsUninitialized(Property* expr);
   bool LoadIsPolymorphic(Property* expr);
+  bool StoreIsUninitialized(TypeFeedbackId ast_id);
   bool StoreIsMonomorphicNormal(TypeFeedbackId ast_id);
   bool StoreIsPolymorphic(TypeFeedbackId ast_id);
   bool CallIsMonomorphic(Call* expr);
@@ -299,14 +300,12 @@
                   Handle<Type>* left,
                   Handle<Type>* right,
                   Handle<Type>* result,
-                  bool* has_fixed_right_arg,
-                  int* fixed_right_arg_value);
+                  Maybe<int>* fixed_right_arg);
 
-  void CompareTypes(TypeFeedbackId id,
-                    Handle<Type>* left_type,
-                    Handle<Type>* right_type,
-                    Handle<Type>* overall_type,
-                    Handle<Type>* compare_nil_type);
+  void CompareType(TypeFeedbackId id,
+                   Handle<Type>* left,
+                   Handle<Type>* right,
+                   Handle<Type>* combined);
 
   Handle<Type> ClauseType(TypeFeedbackId id);
 
diff --git a/src/typedarray.js b/src/typedarray.js
index 04c487f..0d90355 100644
--- a/src/typedarray.js
+++ b/src/typedarray.js
@@ -37,7 +37,7 @@
 
 function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
   function ConstructByArrayBuffer(obj, buffer, byteOffset, length) {
-    var offset = IS_UNDEFINED(byteOffset) ? 0 : TO_POSITIVE_INTEGER(byteOffset);
+    var offset = ToPositiveInteger(byteOffset, "invalid_typed_array_length")
 
     if (offset % elementSize !== 0) {
       throw MakeRangeError("invalid_typed_array_alignment",
@@ -58,7 +58,7 @@
       newByteLength = bufferByteLength - offset;
       newLength = newByteLength / elementSize;
     } else {
-      var newLength = TO_POSITIVE_INTEGER(length);
+      var newLength = ToPositiveInteger(length, "invalid_typed_array_length");
       newByteLength = newLength * elementSize;
     }
     if (offset + newByteLength > bufferByteLength) {
@@ -68,7 +68,7 @@
   }
 
   function ConstructByLength(obj, length) {
-    var l = IS_UNDEFINED(length) ? 0 : TO_POSITIVE_INTEGER(length);
+    var l = ToPositiveInteger(length, "invalid_typed_array_length");
     var byteLength = l * elementSize;
     var buffer = new global.ArrayBuffer(byteLength);
     %TypedArrayInitialize(obj, arrayId, buffer, 0, byteLength);
@@ -76,7 +76,7 @@
 
   function ConstructByArrayLike(obj, arrayLike) {
     var length = arrayLike.length;
-    var l =  IS_UNDEFINED(length) ? 0 : TO_POSITIVE_INTEGER(length);
+    var l =  ToPositiveInteger(length, "invalid_typed_array_length");
     var byteLength = l * elementSize;
     var buffer = new $ArrayBuffer(byteLength);
     %TypedArrayInitialize(obj, arrayId, buffer, 0, byteLength);
@@ -97,7 +97,7 @@
         throw MakeTypeError("parameterless_typed_array_constr", [name]);
       }
     } else {
-      return new constructor(arg1, arg2, arg3);
+      throw MakeTypeError("constructor_not_function", [name])
     }
   }
 }
@@ -146,7 +146,10 @@
 }
 
 function TypedArraySet(obj, offset) {
-  var intOffset = IS_UNDEFINED(offset) ? 0 : TO_POSITIVE_INTEGER(offset);
+  var intOffset = IS_UNDEFINED(offset) ? 0 : TO_INTEGER(offset);
+  if (intOffset < 0) {
+    throw MakeTypeError("typed_array_set_negative_offset");
+  }
   if (%TypedArraySetFastCases(this, obj, intOffset))
     return;
 
@@ -197,3 +200,276 @@
 SetupTypedArray(7, "Float32Array", global.Float32Array, 4);
 SetupTypedArray(8, "Float64Array", global.Float64Array, 8);
 SetupTypedArray(9, "Uint8ClampedArray", global.Uint8ClampedArray, 1);
+
+
+// --------------------------- DataView -----------------------------
+
+var $DataView = global.DataView;
+
+function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
+  if (%_IsConstructCall()) {
+    if (!IS_ARRAYBUFFER(buffer)) {
+      throw MakeTypeError('data_view_not_array_buffer', []);
+    }
+    var bufferByteLength = %ArrayBufferGetByteLength(buffer);
+    var offset = ToPositiveInteger(byteOffset, 'invalid_data_view_offset');
+    if (offset > bufferByteLength) {
+      throw MakeRangeError('invalid_data_view_offset');
+    }
+    var length = IS_UNDEFINED(byteLength) ?
+      bufferByteLength - offset : TO_INTEGER(byteLength);
+    if (length < 0 || offset + length > bufferByteLength) {
+      throw new MakeRangeError('invalid_data_view_length');
+    }
+    %DataViewInitialize(this, buffer, offset, length);
+  } else {
+    throw MakeTypeError('constructor_not_function', ["DataView"]);
+  }
+}
+
+function DataViewGetBuffer() {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.buffer', this]);
+  }
+  return %DataViewGetBuffer(this);
+}
+
+function DataViewGetByteOffset() {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.byteOffset', this]);
+  }
+  return %DataViewGetByteOffset(this);
+}
+
+function DataViewGetByteLength() {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.byteLength', this]);
+  }
+  return %DataViewGetByteLength(this);
+}
+
+function ToPositiveDataViewOffset(offset) {
+  return ToPositiveInteger(offset, 'invalid_data_view_accessor_offset');
+}
+
+function DataViewGetInt8(offset, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.getInt8', this]);
+  }
+  return %DataViewGetInt8(this,
+                          ToPositiveDataViewOffset(offset),
+                          !!little_endian);
+}
+
+function DataViewSetInt8(offset, value, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.setInt8', this]);
+  }
+  %DataViewSetInt8(this,
+                   ToPositiveDataViewOffset(offset),
+                   TO_NUMBER_INLINE(value),
+                   !!little_endian);
+}
+
+function DataViewGetUint8(offset, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.getUint8', this]);
+  }
+  return %DataViewGetUint8(this,
+                           ToPositiveDataViewOffset(offset),
+                           !!little_endian);
+}
+
+function DataViewSetUint8(offset, value, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.setUint8', this]);
+  }
+  %DataViewSetUint8(this,
+                   ToPositiveDataViewOffset(offset),
+                   TO_NUMBER_INLINE(value),
+                   !!little_endian);
+}
+
+function DataViewGetInt16(offset, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.getInt16', this]);
+  }
+  return %DataViewGetInt16(this,
+                           ToPositiveDataViewOffset(offset),
+                           !!little_endian);
+}
+
+function DataViewSetInt16(offset, value, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.setInt16', this]);
+  }
+  %DataViewSetInt16(this,
+                    ToPositiveDataViewOffset(offset),
+                    TO_NUMBER_INLINE(value),
+                    !!little_endian);
+}
+
+function DataViewGetUint16(offset, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.getUint16', this]);
+  }
+  return %DataViewGetUint16(this,
+                            ToPositiveDataViewOffset(offset),
+                            !!little_endian);
+}
+
+function DataViewSetUint16(offset, value, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.setUint16', this]);
+  }
+  %DataViewSetUint16(this,
+                     ToPositiveDataViewOffset(offset),
+                     TO_NUMBER_INLINE(value),
+                     !!little_endian);
+}
+
+function DataViewGetInt32(offset, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.getInt32', this]);
+  }
+  return %DataViewGetInt32(this,
+                           ToPositiveDataViewOffset(offset),
+                           !!little_endian);
+}
+
+function DataViewSetInt32(offset, value, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.setInt32', this]);
+  }
+  %DataViewSetInt32(this,
+                    ToPositiveDataViewOffset(offset),
+                    TO_NUMBER_INLINE(value),
+                    !!little_endian);
+}
+
+function DataViewGetUint32(offset, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.getUint32', this]);
+  }
+  return %DataViewGetUint32(this,
+                            ToPositiveDataViewOffset(offset),
+                            !!little_endian);
+}
+
+function DataViewSetUint32(offset, value, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.setUint32', this]);
+  }
+  %DataViewSetUint32(this,
+                     ToPositiveDataViewOffset(offset),
+                     TO_NUMBER_INLINE(value),
+                     !!little_endian);
+}
+
+function DataViewGetFloat32(offset, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.getFloat32', this]);
+  }
+  return %DataViewGetFloat32(this,
+                             ToPositiveDataViewOffset(offset),
+                             !!little_endian);
+}
+
+function DataViewSetFloat32(offset, value, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.setFloat32', this]);
+  }
+  %DataViewSetFloat32(this,
+                      ToPositiveDataViewOffset(offset),
+                      TO_NUMBER_INLINE(value),
+                      !!little_endian);
+}
+
+function DataViewGetFloat64(offset, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.getFloat64', this]);
+  }
+  offset = TO_INTEGER(offset);
+  if (offset < 0) {
+    throw MakeRangeError("invalid_data_view_accessor_offset");
+  }
+  return %DataViewGetFloat64(this,
+                             ToPositiveDataViewOffset(offset),
+                             !!little_endian);
+}
+
+function DataViewSetFloat64(offset, value, little_endian) {
+  if (!IS_DATAVIEW(this)) {
+    throw MakeTypeError('incompatible_method_reciever',
+                        ['DataView.setFloat64', this]);
+  }
+  offset = TO_INTEGER(offset);
+  if (offset < 0) {
+    throw MakeRangeError("invalid_data_view_accessor_offset");
+  }
+  %DataViewSetFloat64(this,
+                      ToPositiveDataViewOffset(offset),
+                      TO_NUMBER_INLINE(value),
+                      !!little_endian);
+}
+
+function SetupDataView() {
+  %CheckIsBootstrapping();
+
+  // Setup the DataView constructor.
+  %SetCode($DataView, DataViewConstructor);
+  %FunctionSetPrototype($DataView, new $Object);
+
+  // Set up constructor property on the DataView prototype.
+  %SetProperty($DataView.prototype, "constructor", $DataView, DONT_ENUM);
+
+  InstallGetter($DataView.prototype, "buffer", DataViewGetBuffer);
+  InstallGetter($DataView.prototype, "byteOffset", DataViewGetByteOffset);
+  InstallGetter($DataView.prototype, "byteLength", DataViewGetByteLength);
+
+  InstallFunctions($DataView.prototype, DONT_ENUM, $Array(
+      "getInt8", DataViewGetInt8,
+      "setInt8", DataViewSetInt8,
+
+      "getUint8", DataViewGetUint8,
+      "setUint8", DataViewSetUint8,
+
+      "getInt16", DataViewGetInt16,
+      "setInt16", DataViewSetInt16,
+
+      "getUint16", DataViewGetUint16,
+      "setUint16", DataViewSetUint16,
+
+      "getInt32", DataViewGetInt32,
+      "setInt32", DataViewSetInt32,
+
+      "getUint32", DataViewGetUint32,
+      "setUint32", DataViewSetUint32,
+
+      "getFloat32", DataViewGetFloat32,
+      "setFloat32", DataViewSetFloat32,
+
+      "getFloat64", DataViewGetFloat64,
+      "setFloat64", DataViewSetFloat64
+  ));
+}
+
+SetupDataView();
diff --git a/src/types.cc b/src/types.cc
index 20e8a4c..1275dea 100644
--- a/src/types.cc
+++ b/src/types.cc
@@ -125,12 +125,13 @@
       map = *this->as_class();
     } else {
       Handle<v8::internal::Object> value = this->as_constant();
-      if (value->IsSmi()) return kInteger31;
+      if (value->IsSmi()) return kSmi;
       map = HeapObject::cast(*value)->map();
       if (map->instance_type() == ODDBALL_TYPE) {
         if (value->IsUndefined()) return kUndefined;
         if (value->IsNull()) return kNull;
         if (value->IsTrue() || value->IsFalse()) return kBoolean;
+        if (value->IsTheHole()) return kAny;
       }
     }
     switch (map->instance_type()) {
@@ -174,14 +175,18 @@
       case JS_GLOBAL_PROXY_TYPE:
       case JS_ARRAY_BUFFER_TYPE:
       case JS_TYPED_ARRAY_TYPE:
+      case JS_DATA_VIEW_TYPE:
+      case JS_SET_TYPE:
+      case JS_MAP_TYPE:
       case JS_WEAK_MAP_TYPE:
-      case JS_REGEXP_TYPE:
         if (map->is_undetectable()) return kUndetectable;
         return kOtherObject;
       case JS_ARRAY_TYPE:
         return kArray;
       case JS_FUNCTION_TYPE:
         return kFunction;
+      case JS_REGEXP_TYPE:
+        return kRegExp;
       case JS_PROXY_TYPE:
       case JS_FUNCTION_PROXY_TYPE:
         return kProxy;
@@ -196,6 +201,10 @@
         // We ought to find a cleaner solution for compiling stubs parameterised
         // over type or class variables, esp ones with bounds...
         return kDetectable;
+      case DECLARED_ACCESSOR_INFO_TYPE:
+      case EXECUTABLE_ACCESSOR_INFO_TYPE:
+      case ACCESSOR_PAIR_TYPE:
+        return kInternal;
       default:
         UNREACHABLE();
         return kNone;
@@ -218,7 +227,7 @@
 
 
 // Check this <= that.
-bool Type::Is(Type* that) {
+bool Type::IsSlowCase(Type* that) {
   // Fast path for bitsets.
   if (that->is_bitset()) {
     return (this->LubBitset() | that->as_bitset()) == that->as_bitset();
@@ -243,6 +252,7 @@
 
   // T <= (T1 \/ ... \/ Tn)  <=>  (T <= T1) \/ ... \/ (T <= Tn)
   // (iff T is not a union)
+  ASSERT(!this->is_union());
   if (that->is_union()) {
     Handle<Unioned> unioned = that->as_union();
     for (int i = 0; i < unioned->length(); ++i) {
@@ -267,13 +277,6 @@
     return (this->LubBitset() & that->as_bitset()) != 0;
   }
 
-  if (this->is_class()) {
-    return that->is_class() && *this->as_class() == *that->as_class();
-  }
-  if (this->is_constant()) {
-    return that->is_constant() && *this->as_constant() == *that->as_constant();
-  }
-
   // (T1 \/ ... \/ Tn) overlaps T <=> (T1 overlaps T) \/ ... \/ (Tn overlaps T)
   if (this->is_union()) {
     Handle<Unioned> unioned = this->as_union();
@@ -294,6 +297,14 @@
     return false;
   }
 
+  ASSERT(!that->is_union());
+  if (this->is_class()) {
+    return that->is_class() && *this->as_class() == *that->as_class();
+  }
+  if (this->is_constant()) {
+    return that->is_constant() && *this->as_constant() == *that->as_constant();
+  }
+
   return false;
 }
 
@@ -302,12 +313,12 @@
   ASSERT(!this->is_union());
   for (int i = 0; i < current_size; ++i) {
     Handle<Type> type = union_get(unioned, i);
-    if (type->is_bitset() ? this->Is(type) : this == *type) return true;
+    if (this->Is(type)) return true;
   }
   return false;
 }
 
-// Get non-bitsets from this which are not subsumed by that, store at unioned,
+// Get non-bitsets from this which are not subsumed by union, store at unioned,
 // starting at index. Returns updated index.
 int Type::ExtendUnion(Handle<Unioned> result, int current_size) {
   int old_size = current_size;
@@ -334,6 +345,12 @@
     return from_bitset(type1->as_bitset() | type2->as_bitset());
   }
 
+  // Fast case: top or bottom types.
+  if (type1->SameValue(Type::Any())) return *type1;
+  if (type2->SameValue(Type::Any())) return *type2;
+  if (type1->SameValue(Type::None())) return *type2;
+  if (type2->SameValue(Type::None())) return *type1;
+
   // Semi-fast case: Unioned objects are neither involved nor produced.
   if (!(type1->is_union() || type2->is_union())) {
     if (type1->Is(type2)) return *type2;
@@ -374,6 +391,85 @@
 }
 
 
+// Get non-bitsets from this which are also in that, store at unioned,
+// starting at index. Returns updated index.
+int Type::ExtendIntersection(
+    Handle<Unioned> result, Handle<Type> that, int current_size) {
+  int old_size = current_size;
+  if (this->is_class() || this->is_constant()) {
+    if (this->Is(that) && !this->InUnion(result, old_size))
+      result->set(current_size++, this);
+  } else if (this->is_union()) {
+    Handle<Unioned> unioned = this->as_union();
+    for (int i = 0; i < unioned->length(); ++i) {
+      Handle<Type> type = union_get(unioned, i);
+      ASSERT(i == 0 || !(type->is_bitset() || type->Is(union_get(unioned, 0))));
+      if (type->is_bitset()) continue;
+      if (type->Is(that) && !type->InUnion(result, old_size))
+        result->set(current_size++, *type);
+    }
+  }
+  return current_size;
+}
+
+
+// Intersection is O(1) on simple bit unions, but O(n*m) on structured unions.
+// TODO(rossberg): Should we use object sets somehow? Is it worth it?
+Type* Type::Intersect(Handle<Type> type1, Handle<Type> type2) {
+  // Fast case: bit sets.
+  if (type1->is_bitset() && type2->is_bitset()) {
+    return from_bitset(type1->as_bitset() & type2->as_bitset());
+  }
+
+  // Fast case: top or bottom types.
+  if (type1->SameValue(Type::None())) return *type1;
+  if (type2->SameValue(Type::None())) return *type2;
+  if (type1->SameValue(Type::Any())) return *type2;
+  if (type2->SameValue(Type::Any())) return *type1;
+
+  // Semi-fast case: Unioned objects are neither involved nor produced.
+  if (!(type1->is_union() || type2->is_union())) {
+    if (type1->Is(type2)) return *type1;
+    if (type2->Is(type1)) return *type2;
+  }
+
+  // Slow case: may need to produce a Unioned object.
+  Isolate* isolate = NULL;
+  int size = 0;
+  if (!type1->is_bitset()) {
+    isolate = HeapObject::cast(*type1)->GetIsolate();
+    size = (type1->is_union() ? type1->as_union()->length() : 2);
+  }
+  if (!type2->is_bitset()) {
+    isolate = HeapObject::cast(*type2)->GetIsolate();
+    int size2 = (type2->is_union() ? type2->as_union()->length() : 2);
+    size = (size == 0 ? size2 : Min(size, size2));
+  }
+  ASSERT(isolate != NULL);
+  ASSERT(size >= 2);
+  Handle<Unioned> unioned = isolate->factory()->NewFixedArray(size);
+  size = 0;
+
+  int bitset = type1->GlbBitset() & type2->GlbBitset();
+  if (bitset != kNone) unioned->set(size++, from_bitset(bitset));
+  size = type1->ExtendIntersection(unioned, type2, size);
+  size = type2->ExtendIntersection(unioned, type1, size);
+
+  if (size == 0) {
+    return None();
+  } else if (size == 1) {
+    return *union_get(unioned, 0);
+  } else if (size == unioned->length()) {
+    return from_handle(unioned);
+  }
+
+  // There were dropped cases. Copy to smaller union.
+  Handle<Unioned> result = isolate->factory()->NewFixedArray(size);
+  for (int i = 0; i < size; ++i) result->set(i, unioned->get(i));
+  return from_handle(result);
+}
+
+
 Type* Type::Optional(Handle<Type> type) {
   return type->is_bitset()
       ? from_bitset(type->as_bitset() | kUndefined)
diff --git a/src/types.h b/src/types.h
index 883d884..a2bcda6 100644
--- a/src/types.h
+++ b/src/types.h
@@ -48,8 +48,8 @@
 //   T <= Any
 //
 //   Oddball = Boolean \/ Null \/ Undefined
-//   Number = Integer32 \/ Double
-//   Integer31 < Integer32
+//   Number = Signed32 \/ Unsigned32 \/ Double
+//   Smi <= Signed32
 //   Name = String \/ Symbol
 //   UniqueName = InternalizedString \/ Symbol
 //   InternalizedString < String
@@ -60,6 +60,7 @@
 //   Receiver = Object \/ Proxy
 //   Array < Object
 //   Function < Object
+//   RegExp < Object
 //
 //   Class(map) < T   iff instance_type(map) < T
 //   Constant(x) < T  iff instance_type(map(x)) < T
@@ -83,9 +84,12 @@
 // lattice (e.g., splitting up number types further) without invalidating any
 // existing assumptions or tests.
 //
+// Consequently, do not use pointer equality for type tests, always use Is!
+//
 // Internally, all 'primitive' types, and their unions, are represented as
 // bitsets via smis. Class is a heap pointer to the respective map. Only
 // Constant's, or unions containing Class'es or Constant's, require allocation.
+// Note that the bitset representation is closed under both Union and Intersect.
 //
 // The type representation is heap-allocated, so cannot (currently) be used in
 // a parallel compilation context.
@@ -103,9 +107,11 @@
   static Type* Undefined() { return from_bitset(kUndefined); }
 
   static Type* Number() { return from_bitset(kNumber); }
-  static Type* Integer31() { return from_bitset(kInteger31); }
-  static Type* Integer32() { return from_bitset(kInteger32); }
+  static Type* Smi() { return from_bitset(kSmi); }
+  static Type* Signed32() { return from_bitset(kSigned32); }
+  static Type* Unsigned32() { return from_bitset(kUnsigned32); }
   static Type* Double() { return from_bitset(kDouble); }
+  static Type* NumberOrString() { return from_bitset(kNumberOrString); }
 
   static Type* Name() { return from_bitset(kName); }
   static Type* UniqueName() { return from_bitset(kUniqueName); }
@@ -118,7 +124,9 @@
   static Type* Undetectable() { return from_bitset(kUndetectable); }
   static Type* Array() { return from_bitset(kArray); }
   static Type* Function() { return from_bitset(kFunction); }
+  static Type* RegExp() { return from_bitset(kRegExp); }
   static Type* Proxy() { return from_bitset(kProxy); }
+  static Type* Internal() { return from_bitset(kInternal); }
 
   static Type* Class(Handle<Map> map) { return from_handle(map); }
   static Type* Constant(Handle<HeapObject> value) {
@@ -129,9 +137,10 @@
   }
 
   static Type* Union(Handle<Type> type1, Handle<Type> type2);
+  static Type* Intersect(Handle<Type> type1, Handle<Type> type2);
   static Type* Optional(Handle<Type> type);  // type \/ Undefined
 
-  bool Is(Type* that);
+  bool Is(Type* that) { return (this == that) ? true : IsSlowCase(that); }
   bool Is(Handle<Type> that) { return this->Is(*that); }
   bool Maybe(Type* that);
   bool Maybe(Handle<Type> that) { return this->Maybe(*that); }
@@ -186,28 +195,32 @@
     kNull = 1 << 0,
     kUndefined = 1 << 1,
     kBoolean = 1 << 2,
-    kInteger31 = 1 << 3,
-    kOtherInteger = 1 << 4,
-    kDouble = 1 << 5,
-    kSymbol = 1 << 6,
-    kInternalizedString = 1 << 7,
-    kOtherString = 1 << 8,
-    kUndetectable = 1 << 9,
-    kArray = 1 << 10,
-    kFunction = 1 << 11,
-    kOtherObject = 1 << 12,
-    kProxy = 1 << 13,
+    kSmi = 1 << 3,
+    kOtherSigned32 = 1 << 4,
+    kUnsigned32 = 1 << 5,
+    kDouble = 1 << 6,
+    kSymbol = 1 << 7,
+    kInternalizedString = 1 << 8,
+    kOtherString = 1 << 9,
+    kUndetectable = 1 << 10,
+    kArray = 1 << 11,
+    kFunction = 1 << 12,
+    kRegExp = 1 << 13,
+    kOtherObject = 1 << 14,
+    kProxy = 1 << 15,
+    kInternal = 1 << 16,
 
     kOddball = kBoolean | kNull | kUndefined,
-    kInteger32 = kInteger31 | kOtherInteger,
-    kNumber = kInteger32 | kDouble,
+    kSigned32 = kSmi | kOtherSigned32,
+    kNumber = kSigned32 | kUnsigned32 | kDouble,
     kString = kInternalizedString | kOtherString,
     kUniqueName = kSymbol | kInternalizedString,
     kName = kSymbol | kString,
-    kObject = kUndetectable | kArray | kFunction | kOtherObject,
+    kNumberOrString = kNumber | kString,
+    kObject = kUndetectable | kArray | kFunction | kRegExp | kOtherObject,
     kReceiver = kObject | kProxy,
     kAllocated = kDouble | kName | kReceiver,
-    kAny = kOddball | kNumber | kAllocated,
+    kAny = kOddball | kNumber | kAllocated | kInternal,
     kDetectable = kAllocated - kUndetectable,
     kNone = 0
   };
@@ -217,6 +230,8 @@
   bool is_constant() { return this->IsBox(); }
   bool is_union() { return this->IsFixedArray(); }
 
+  bool IsSlowCase(Type* that);
+
   int as_bitset() { return Smi::cast(this)->value(); }
   Handle<Map> as_class() { return Handle<Map>::cast(handle()); }
   Handle<v8::internal::Object> as_constant() {
@@ -248,6 +263,8 @@
   int GlbBitset();  // greatest lower bound that's a bitset
   bool InUnion(Handle<Unioned> unioned, int current_size);
   int ExtendUnion(Handle<Unioned> unioned, int current_size);
+  int ExtendIntersection(
+      Handle<Unioned> unioned, Handle<Type> type, int current_size);
 };
 
 } }  // namespace v8::internal
diff --git a/src/typing.cc b/src/typing.cc
index e1fd8aa..3f3ff60 100644
--- a/src/typing.cc
+++ b/src/typing.cc
@@ -295,7 +295,7 @@
     if ((prop->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL &&
         !CompileTimeValue::IsCompileTimeValue(prop->value())) ||
         prop->kind() == ObjectLiteral::Property::COMPUTED) {
-      if (prop->key()->handle()->IsInternalizedString() && prop->emit_store())
+      if (prop->key()->value()->IsInternalizedString() && prop->emit_store())
         prop->RecordTypeFeedback(oracle());
     }
   }
@@ -404,7 +404,9 @@
   ASSERT(!HasStackOverflow());
   CHECK_ALIVE(Visit(expr->expression()));
 
-  expr->RecordTypeFeedback(oracle());
+  // Collect type feedback.
+  Handle<Type> op_type = oracle()->UnaryType(expr->UnaryOperationFeedbackId());
+  MergeLowerType(expr->expression(), op_type);
   if (expr->op() == Token::NOT) {
     // TODO(rossberg): only do in test or value context.
     expr->expression()->RecordToBooleanTypeFeedback(oracle());
@@ -429,7 +431,15 @@
   CHECK_ALIVE(Visit(expr->left()));
   CHECK_ALIVE(Visit(expr->right()));
 
-  expr->RecordTypeFeedback(oracle());
+  // Collect type feedback.
+  Handle<Type> type, left_type, right_type;
+  Maybe<int> fixed_right_arg;
+  oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
+      &left_type, &right_type, &type, &fixed_right_arg);
+  MergeLowerType(expr, type);
+  MergeLowerType(expr->left(), left_type);
+  MergeLowerType(expr->right(), right_type);
+  expr->set_fixed_right_arg(fixed_right_arg);
   if (expr->op() == Token::OR || expr->op() == Token::AND) {
     expr->left()->RecordToBooleanTypeFeedback(oracle());
   }
@@ -441,7 +451,13 @@
   CHECK_ALIVE(Visit(expr->left()));
   CHECK_ALIVE(Visit(expr->right()));
 
-  expr->RecordTypeFeedback(oracle());
+  // Collect type feedback.
+  Handle<Type> left_type, right_type, combined_type;
+  oracle()->CompareType(expr->CompareOperationFeedbackId(),
+      &left_type, &right_type, &combined_type);
+  MergeLowerType(expr->left(), left_type);
+  MergeLowerType(expr->right(), right_type);
+  expr->set_combined_type(combined_type);
 }
 
 
diff --git a/src/typing.h b/src/typing.h
index 1f172ea..2d3fac0 100644
--- a/src/typing.h
+++ b/src/typing.h
@@ -62,6 +62,13 @@
   TypeFeedbackOracle* oracle() { return &oracle_; }
   Zone* zone() const { return info_->zone(); }
 
+  void MergeLowerType(Expression* e, Handle<Type> t) {
+    e->set_lower_type(handle(Type::Union(e->lower_type(), t), isolate_));
+  }
+  void MergeUpperType(Expression* e, Handle<Type> t) {
+    e->set_upper_type(handle(Type::Intersect(e->upper_type(), t), isolate_));
+  }
+
   void VisitDeclarations(ZoneList<Declaration*>* declarations);
   void VisitStatements(ZoneList<Statement*>* statements);
 
diff --git a/src/unbound-queue-inl.h b/src/unbound-queue-inl.h
index fffb1db..796ba40 100644
--- a/src/unbound-queue-inl.h
+++ b/src/unbound-queue-inl.h
@@ -30,6 +30,8 @@
 
 #include "unbound-queue.h"
 
+#include "atomicops.h"
+
 namespace v8 {
 namespace internal {
 
@@ -66,11 +68,12 @@
 
 
 template<typename Record>
-void UnboundQueue<Record>::Dequeue(Record* rec) {
-  ASSERT(divider_ != last_);
+bool UnboundQueue<Record>::Dequeue(Record* rec) {
+  if (divider_ == Acquire_Load(&last_)) return false;
   Node* next = reinterpret_cast<Node*>(divider_)->next;
   *rec = next->value;
-  OS::ReleaseStore(&divider_, reinterpret_cast<AtomicWord>(next));
+  Release_Store(&divider_, reinterpret_cast<AtomicWord>(next));
+  return true;
 }
 
 
@@ -78,14 +81,23 @@
 void UnboundQueue<Record>::Enqueue(const Record& rec) {
   Node*& next = reinterpret_cast<Node*>(last_)->next;
   next = new Node(rec);
-  OS::ReleaseStore(&last_, reinterpret_cast<AtomicWord>(next));
-  while (first_ != reinterpret_cast<Node*>(divider_)) DeleteFirst();
+  Release_Store(&last_, reinterpret_cast<AtomicWord>(next));
+
+  while (first_ != reinterpret_cast<Node*>(Acquire_Load(&divider_))) {
+    DeleteFirst();
+  }
 }
 
 
 template<typename Record>
-Record* UnboundQueue<Record>::Peek() {
-  ASSERT(divider_ != last_);
+bool UnboundQueue<Record>::IsEmpty() const {
+  return NoBarrier_Load(&divider_) == NoBarrier_Load(&last_);
+}
+
+
+template<typename Record>
+Record* UnboundQueue<Record>::Peek() const {
+  if (divider_ == Acquire_Load(&last_)) return NULL;
   Node* next = reinterpret_cast<Node*>(divider_)->next;
   return &next->value;
 }
diff --git a/src/unbound-queue.h b/src/unbound-queue.h
index 59a426b..429e3c6 100644
--- a/src/unbound-queue.h
+++ b/src/unbound-queue.h
@@ -46,10 +46,10 @@
   inline UnboundQueue();
   inline ~UnboundQueue();
 
-  INLINE(void Dequeue(Record* rec));
+  INLINE(bool Dequeue(Record* rec));
   INLINE(void Enqueue(const Record& rec));
-  INLINE(bool IsEmpty()) { return divider_ == last_; }
-  INLINE(Record* Peek());
+  INLINE(bool IsEmpty() const);
+  INLINE(Record* Peek() const);
 
  private:
   INLINE(void DeleteFirst());
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 373618e..dfe1e20 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -94,8 +94,6 @@
 #define STATS_COUNTER_LIST_1(SC)                                      \
   /* Global Handle Count*/                                            \
   SC(global_handles, V8.GlobalHandles)                                \
-  /* Mallocs from PCRE */                                             \
-  SC(pcre_mallocs, V8.PcreMallocCount)                                \
   /* OS Memory allocated */                                           \
   SC(memory_allocated, V8.OsMemoryAllocated)                          \
   SC(normalized_maps, V8.NormalizedMaps)                              \
@@ -114,8 +112,6 @@
   SC(arguments_adaptors, V8.ArgumentsAdaptors)                        \
   SC(compilation_cache_hits, V8.CompilationCacheHits)                 \
   SC(compilation_cache_misses, V8.CompilationCacheMisses)             \
-  SC(regexp_cache_hits, V8.RegExpCacheHits)                           \
-  SC(regexp_cache_misses, V8.RegExpCacheMisses)                       \
   SC(string_ctor_calls, V8.StringConstructorCalls)                    \
   SC(string_ctor_conversions, V8.StringConstructorConversions)        \
   SC(string_ctor_cached_number, V8.StringConstructorCachedNumber)     \
@@ -133,8 +129,6 @@
   SC(total_preparse_symbols_skipped, V8.TotalPreparseSymbolSkipped)   \
   /* Amount of compiled source code. */                               \
   SC(total_compile_size, V8.TotalCompileSize)                         \
-  /* Amount of source code compiled with the old codegen. */          \
-  SC(total_old_codegen_source_size, V8.TotalOldCodegenSourceSize)     \
   /* Amount of source code compiled with the full codegen. */         \
   SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize)   \
   /* Number of contexts created from scratch. */                      \
@@ -161,8 +155,6 @@
      V8.GCCompactorCausedByPromotedData)                              \
   SC(gc_compactor_caused_by_oldspace_exhaustion,                      \
      V8.GCCompactorCausedByOldspaceExhaustion)                        \
-  SC(gc_compactor_caused_by_weak_handles,                             \
-     V8.GCCompactorCausedByWeakHandles)                               \
   SC(gc_last_resort_from_js, V8.GCLastResortFromJS)                   \
   SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles)         \
   /* How is the generic keyed-load stub used? */                      \
@@ -177,39 +169,9 @@
   SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict)         \
   SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache) \
   SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict)   \
-  SC(keyed_call_generic_value_type, V8.KeyedCallGenericValueType)     \
   SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow)                \
   SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad)       \
-  /* Count how much the monomorphic keyed-load stubs are hit. */      \
-  SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype)    \
-  SC(keyed_load_string_length, V8.KeyedLoadStringLength)              \
-  SC(keyed_load_array_length, V8.KeyedLoadArrayLength)                \
-  SC(keyed_load_constant_function, V8.KeyedLoadConstantFunction)      \
-  SC(keyed_load_field, V8.KeyedLoadField)                             \
-  SC(keyed_load_callback, V8.KeyedLoadCallback)                       \
-  SC(keyed_load_interceptor, V8.KeyedLoadInterceptor)                 \
-  SC(keyed_load_inline, V8.KeyedLoadInline)                           \
-  SC(keyed_load_inline_miss, V8.KeyedLoadInlineMiss)                  \
-  SC(named_load_inline, V8.NamedLoadInline)                           \
-  SC(named_load_inline_miss, V8.NamedLoadInlineMiss)                  \
-  SC(named_load_global_inline, V8.NamedLoadGlobalInline)              \
-  SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss)     \
-  SC(dont_delete_hint_hit, V8.DontDeleteHintHit)                      \
-  SC(dont_delete_hint_miss, V8.DontDeleteHintMiss)                    \
   SC(named_load_global_stub, V8.NamedLoadGlobalStub)                  \
-  SC(named_load_global_stub_miss, V8.NamedLoadGlobalStubMiss)         \
-  SC(keyed_store_field, V8.KeyedStoreField)                           \
-  SC(named_store_inline_field, V8.NamedStoreInlineField)              \
-  SC(keyed_store_inline, V8.KeyedStoreInline)                         \
-  SC(named_load_inline_generic, V8.NamedLoadInlineGeneric)            \
-  SC(named_load_inline_field, V8.NamedLoadInlineFast)                 \
-  SC(keyed_load_inline_generic, V8.KeyedLoadInlineGeneric)            \
-  SC(keyed_load_inline_fast, V8.KeyedLoadInlineFast)                  \
-  SC(keyed_store_inline_generic, V8.KeyedStoreInlineGeneric)          \
-  SC(keyed_store_inline_fast, V8.KeyedStoreInlineFast)                \
-  SC(named_store_inline_generic, V8.NamedStoreInlineGeneric)          \
-  SC(named_store_inline_fast, V8.NamedStoreInlineFast)                \
-  SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss)                \
   SC(named_store_global_inline, V8.NamedStoreGlobalInline)            \
   SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss)   \
   SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs)    \
@@ -231,7 +193,6 @@
   SC(call_global_inline_miss, V8.CallGlobalInlineMiss)                \
   SC(constructed_objects, V8.ConstructedObjects)                      \
   SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime)       \
-  SC(constructed_objects_stub, V8.ConstructedObjectsStub)             \
   SC(negative_lookups, V8.NegativeLookups)                            \
   SC(negative_lookups_miss, V8.NegativeLookupsMiss)                   \
   SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes)    \
@@ -243,9 +204,7 @@
   SC(enum_cache_hits, V8.EnumCacheHits)                               \
   SC(enum_cache_misses, V8.EnumCacheMisses)                           \
   SC(zone_segment_bytes, V8.ZoneSegmentBytes)                         \
-  SC(compute_entry_frame, V8.ComputeEntryFrame)                       \
   SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls)            \
-  SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs)   \
   SC(fast_new_closure_total, V8.FastNewClosureTotal)                  \
   SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized)   \
   SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
@@ -279,10 +238,9 @@
   SC(transcendental_cache_miss, V8.TranscendentalCacheMiss)           \
   SC(stack_interrupts, V8.StackInterrupts)                            \
   SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks)                 \
-  SC(smi_checks_removed, V8.SmiChecksRemoved)                         \
-  SC(map_checks_removed, V8.MapChecksRemoved)                         \
-  SC(quote_json_char_count, V8.QuoteJsonCharacterCount)               \
-  SC(quote_json_char_recount, V8.QuoteJsonCharacterReCount)           \
+  SC(soft_deopts_requested, V8.SoftDeoptsRequested)                   \
+  SC(soft_deopts_inserted, V8.SoftDeoptsInserted)                     \
+  SC(soft_deopts_executed, V8.SoftDeoptsExecuted)                     \
   SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable)      \
   SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted)      \
   SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed)                \
diff --git a/src/v8.cc b/src/v8.cc
index 80b12de..cb67105 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -271,6 +271,44 @@
     FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
   }
   if (FLAG_trace_hydrogen) FLAG_parallel_recompilation = false;
+
+  if (FLAG_sweeper_threads <= 0) {
+    if (FLAG_concurrent_sweeping) {
+      FLAG_sweeper_threads = SystemThreadManager::
+          NumberOfParallelSystemThreads(
+              SystemThreadManager::CONCURRENT_SWEEPING);
+    } else if (FLAG_parallel_sweeping) {
+      FLAG_sweeper_threads = SystemThreadManager::
+          NumberOfParallelSystemThreads(
+              SystemThreadManager::PARALLEL_SWEEPING);
+    }
+    if (FLAG_sweeper_threads == 0) {
+      FLAG_concurrent_sweeping = false;
+      FLAG_parallel_sweeping = false;
+    }
+  } else if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) {
+    FLAG_sweeper_threads = 0;
+  }
+
+  if (FLAG_parallel_marking) {
+    if (FLAG_marking_threads <= 0) {
+      FLAG_marking_threads = SystemThreadManager::
+          NumberOfParallelSystemThreads(
+              SystemThreadManager::PARALLEL_MARKING);
+    }
+    if (FLAG_marking_threads == 0) {
+      FLAG_parallel_marking = false;
+    }
+  } else {
+    FLAG_marking_threads = 0;
+  }
+
+  if (FLAG_parallel_recompilation &&
+      SystemThreadManager::NumberOfParallelSystemThreads(
+          SystemThreadManager::PARALLEL_RECOMPILATION) == 0) {
+    FLAG_parallel_recompilation = false;
+  }
+
   OS::SetUp();
   Sampler::SetUp();
   CPU::SetUp();
diff --git a/src/v8.h b/src/v8.h
index b16374f..52fb98a 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -101,6 +101,8 @@
   // Support for return-address rewriting profilers.
   static void SetReturnAddressLocationResolver(
       ReturnAddressLocationResolver resolver);
+  // Support for entry hooking JITed code.
+  static void SetFunctionEntryHook(FunctionEntryHook entry_hook);
   // Random number generation support. Not cryptographically safe.
   static uint32_t Random(Context* context);
   // We use random numbers internally in memory allocation and in the
diff --git a/src/version.cc b/src/version.cc
index 9ac143b..03d77bf 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,9 +33,9 @@
 // NOTE these macros are used by some of the tool scripts and the build
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     19
-#define BUILD_NUMBER      18
-#define PATCH_LEVEL       6
+#define MINOR_VERSION     20
+#define BUILD_NUMBER      0
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index d1d6278..1c231a7 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -308,6 +308,7 @@
 
 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  ASSERT(!target->IsConsString());
   Memory::Object_at(pc_) = target;
   CPU::FlushICache(pc_, sizeof(Address));
   if (mode == UPDATE_WRITE_BARRIER &&
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 2a01b0b..9376cc7 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -456,6 +456,8 @@
 
 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
                                              bool is_construct) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // Expects five C++ function parameters.
   // - Address entry (ignored)
   // - JSFunction* function (
@@ -1093,366 +1095,6 @@
 }
 
 
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter initial_capacity is larger than zero an elements
-// backing store is allocated with this size and filled with the hole values.
-// Otherwise the elements backing store is set to the empty FixedArray.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
-                                 Register array_function,
-                                 Register result,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Register scratch3,
-                                 Label* gc_required) {
-  const int initial_capacity = JSArray::kPreallocatedArrayElements;
-  STATIC_ASSERT(initial_capacity >= 0);
-
-  __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
-  // Allocate the JSArray object together with space for a fixed array with the
-  // requested elements.
-  int size = JSArray::kSize;
-  if (initial_capacity > 0) {
-    size += FixedArray::SizeFor(initial_capacity);
-  }
-  __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
-
-  // Allocated the JSArray. Now initialize the fields except for the elements
-  // array.
-  // result: JSObject
-  // scratch1: initial map
-  // scratch2: start of next object
-  Factory* factory = masm->isolate()->factory();
-  __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
-  __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
-          factory->empty_fixed_array());
-  // Field JSArray::kElementsOffset is initialized later.
-  __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
-
-  // If no storage is requested for the elements array just set the empty
-  // fixed array.
-  if (initial_capacity == 0) {
-    __ Move(FieldOperand(result, JSArray::kElementsOffset),
-            factory->empty_fixed_array());
-    return;
-  }
-
-  // Calculate the location of the elements array and set elements array member
-  // of the JSArray.
-  // result: JSObject
-  // scratch2: start of next object
-  __ lea(scratch1, Operand(result, JSArray::kSize));
-  __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
-
-  // Initialize the FixedArray and fill it with holes. FixedArray length is
-  // stored as a smi.
-  // result: JSObject
-  // scratch1: elements array
-  // scratch2: start of next object
-  __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
-          factory->fixed_array_map());
-  __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
-          Smi::FromInt(initial_capacity));
-
-  // Fill the FixedArray with the hole value. Inline the code if short.
-  // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
-  static const int kLoopUnfoldLimit = 4;
-  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
-  if (initial_capacity <= kLoopUnfoldLimit) {
-    // Use a scratch register here to have only one reloc info when unfolding
-    // the loop.
-    for (int i = 0; i < initial_capacity; i++) {
-      __ movq(FieldOperand(scratch1,
-                           FixedArray::kHeaderSize + i * kPointerSize),
-              scratch3);
-    }
-  } else {
-    Label loop, entry;
-    __ movq(scratch2, Immediate(initial_capacity));
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ movq(FieldOperand(scratch1,
-                         scratch2,
-                         times_pointer_size,
-                         FixedArray::kHeaderSize),
-            scratch3);
-    __ bind(&entry);
-    __ decq(scratch2);
-    __ j(not_sign, &loop);
-  }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array and elements_array_end  (see
-// below for when that is not the case). If the parameter fill_with_holes is
-// true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
-                            Register array_function,  // Array function.
-                            Register array_size,  // As a smi, cannot be 0.
-                            Register result,
-                            Register elements_array,
-                            Register elements_array_end,
-                            Register scratch,
-                            bool fill_with_hole,
-                            Label* gc_required) {
-  __ LoadInitialArrayMap(array_function, scratch,
-                         elements_array, fill_with_hole);
-
-  if (FLAG_debug_code) {  // Assert that array size is not zero.
-    __ testq(array_size, array_size);
-    __ Assert(not_zero, "array size is unexpectedly 0");
-  }
-
-  // Allocate the JSArray object together with space for a FixedArray with the
-  // requested elements.
-  SmiIndex index =
-      masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
-  __ Allocate(JSArray::kSize + FixedArray::kHeaderSize,
-              index.scale,
-              index.reg,
-              result,
-              elements_array_end,
-              scratch,
-              gc_required,
-              TAG_OBJECT);
-
-  // Allocated the JSArray. Now initialize the fields except for the elements
-  // array.
-  // result: JSObject
-  // elements_array: initial map
-  // elements_array_end: start of next object
-  // array_size: size of array (smi)
-  Factory* factory = masm->isolate()->factory();
-  __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
-  __ Move(elements_array, factory->empty_fixed_array());
-  __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
-  // Field JSArray::kElementsOffset is initialized later.
-  __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
-
-  // Calculate the location of the elements array and set elements array member
-  // of the JSArray.
-  // result: JSObject
-  // elements_array_end: start of next object
-  // array_size: size of array (smi)
-  __ lea(elements_array, Operand(result, JSArray::kSize));
-  __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
-
-  // Initialize the fixed array. FixedArray length is stored as a smi.
-  // result: JSObject
-  // elements_array: elements array
-  // elements_array_end: start of next object
-  // array_size: size of array (smi)
-  __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
-          factory->fixed_array_map());
-  // For non-empty JSArrays the length of the FixedArray and the JSArray is the
-  // same.
-  __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
-
-  // Fill the allocated FixedArray with the hole value if requested.
-  // result: JSObject
-  // elements_array: elements array
-  // elements_array_end: start of next object
-  if (fill_with_hole) {
-    Label loop, entry;
-    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
-    __ lea(elements_array, Operand(elements_array,
-                                   FixedArray::kHeaderSize - kHeapObjectTag));
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ movq(Operand(elements_array, 0), scratch);
-    __ addq(elements_array, Immediate(kPointerSize));
-    __ bind(&entry);
-    __ cmpq(elements_array, elements_array_end);
-    __ j(below, &loop);
-  }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-//   rdi: constructor (built-in Array function)
-//   rax: argc
-//   rsp[0]: return address
-//   rsp[8]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in rdi needs to be preserved for
-// entering the generic code. In both cases argc in rax needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// a construct call and a normal call.
-void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
-  Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array,
-      has_non_smi_element, finish, cant_transition_map, not_double;
-
-  // Check for array construction with zero arguments.
-  __ testq(rax, rax);
-  __ j(not_zero, &argc_one_or_more);
-
-  __ bind(&empty_array);
-  // Handle construction of an empty array.
-  AllocateEmptyJSArray(masm,
-                       rdi,
-                       rbx,
-                       rcx,
-                       rdx,
-                       r8,
-                       call_generic_code);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->array_function_native(), 1);
-  __ movq(rax, rbx);
-  __ ret(kPointerSize);
-
-  // Check for one argument. Bail out if argument is not smi or if it is
-  // negative.
-  __ bind(&argc_one_or_more);
-  __ cmpq(rax, Immediate(1));
-  __ j(not_equal, &argc_two_or_more);
-  __ movq(rdx, Operand(rsp, kPointerSize));  // Get the argument from the stack.
-
-  __ SmiTest(rdx);
-  __ j(not_zero, &not_empty_array);
-  __ pop(r8);  // Adjust stack.
-  __ Drop(1);
-  __ push(r8);
-  __ movq(rax, Immediate(0));  // Treat this as a call with argc of zero.
-  __ jmp(&empty_array);
-
-  __ bind(&not_empty_array);
-  __ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
-
-  // Handle construction of an empty array of a certain size. Bail out if size
-  // is to large to actually allocate an elements array.
-  __ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
-  __ j(greater_equal, call_generic_code);
-
-  // rax: argc
-  // rdx: array_size (smi)
-  // rdi: constructor
-  // esp[0]: return address
-  // esp[8]: argument
-  AllocateJSArray(masm,
-                  rdi,
-                  rdx,
-                  rbx,
-                  rcx,
-                  r8,
-                  r9,
-                  true,
-                  call_generic_code);
-  __ IncrementCounter(counters->array_function_native(), 1);
-  __ movq(rax, rbx);
-  __ ret(2 * kPointerSize);
-
-  // Handle construction of an array from a list of arguments.
-  __ bind(&argc_two_or_more);
-  __ movq(rdx, rax);
-  __ Integer32ToSmi(rdx, rdx);  // Convet argc to a smi.
-  // rax: argc
-  // rdx: array_size (smi)
-  // rdi: constructor
-  // esp[0] : return address
-  // esp[8] : last argument
-  AllocateJSArray(masm,
-                  rdi,
-                  rdx,
-                  rbx,
-                  rcx,
-                  r8,
-                  r9,
-                  false,
-                  call_generic_code);
-  __ IncrementCounter(counters->array_function_native(), 1);
-
-  // rax: argc
-  // rbx: JSArray
-  // rcx: elements_array
-  // r8: elements_array_end (untagged)
-  // esp[0]: return address
-  // esp[8]: last argument
-
-  // Location of the last argument
-  __ lea(r9, Operand(rsp, kPointerSize));
-
-  // Location of the first array element (Parameter fill_with_holes to
-  // AllocateJSArrayis false, so the FixedArray is returned in rcx).
-  __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
-
-  // rax: argc
-  // rbx: JSArray
-  // rdx: location of the first array element
-  // r9: location of the last argument
-  // esp[0]: return address
-  // esp[8]: last argument
-  Label loop, entry;
-  __ movq(rcx, rax);
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
-  if (FLAG_smi_only_arrays) {
-    __ JumpIfNotSmi(r8, &has_non_smi_element);
-  }
-  __ movq(Operand(rdx, 0), r8);
-  __ addq(rdx, Immediate(kPointerSize));
-  __ bind(&entry);
-  __ decq(rcx);
-  __ j(greater_equal, &loop);
-
-  // Remove caller arguments from the stack and return.
-  // rax: argc
-  // rbx: JSArray
-  // esp[0]: return address
-  // esp[8]: last argument
-  __ bind(&finish);
-  __ pop(rcx);
-  __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
-  __ push(rcx);
-  __ movq(rax, rbx);
-  __ ret(0);
-
-  __ bind(&has_non_smi_element);
-  // Double values are handled by the runtime.
-  __ CheckMap(r8,
-              masm->isolate()->factory()->heap_number_map(),
-              &not_double,
-              DONT_DO_SMI_CHECK);
-  __ bind(&cant_transition_map);
-  __ UndoAllocationInNewSpace(rbx);
-  __ jmp(call_generic_code);
-
-  __ bind(&not_double);
-  // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
-  // rbx: JSArray
-  __ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         r11,
-                                         kScratchRegister,
-                                         &cant_transition_map);
-
-  __ movq(FieldOperand(rbx, HeapObject::kMapOffset), r11);
-  __ RecordWriteField(rbx, HeapObject::kMapOffset, r11, r8,
-                      kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-  // Finish the array initialization loop.
-  Label loop2;
-  __ bind(&loop2);
-  __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
-  __ movq(Operand(rdx, 0), r8);
-  __ addq(rdx, Immediate(kPointerSize));
-  __ decq(rcx);
-  __ j(greater_equal, &loop2);
-  __ jmp(&finish);
-}
-
-
 void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax : argc
@@ -1477,20 +1119,9 @@
 
   // Run the native code for the InternalArray function called as a normal
   // function.
-  if (FLAG_optimize_constructed_arrays) {
-    // tail call a stub
-    InternalArrayConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    ArrayNativeCode(masm, &generic_array_code);
-
-    // Jump to the generic array code in case the specialized code cannot handle
-    // the construction.
-    __ bind(&generic_array_code);
-    Handle<Code> array_code =
-        masm->isolate()->builtins()->InternalArrayCodeGeneric();
-    __ Jump(array_code, RelocInfo::CODE_TARGET);
-  }
+  // tail call a stub
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
 
@@ -1517,61 +1148,16 @@
   }
 
   // Run the native code for the Array function called as a normal function.
-  if (FLAG_optimize_constructed_arrays) {
-    // tail call a stub
-    Handle<Object> undefined_sentinel(
-        masm->isolate()->heap()->undefined_value(),
-        masm->isolate());
-    __ Move(rbx, undefined_sentinel);
-    ArrayConstructorStub stub(masm->isolate());
-    __ TailCallStub(&stub);
-  } else {
-    ArrayNativeCode(masm, &generic_array_code);
-
-    // Jump to the generic array code in case the specialized code cannot handle
-    // the construction.
-    __ bind(&generic_array_code);
-    Handle<Code> array_code =
-        masm->isolate()->builtins()->ArrayCodeGeneric();
-    __ Jump(array_code, RelocInfo::CODE_TARGET);
-  }
+  // tail call a stub
+  Handle<Object> undefined_sentinel(
+      masm->isolate()->heap()->undefined_value(),
+      masm->isolate());
+  __ Move(rbx, undefined_sentinel);
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
 }
 
 
-void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax : argc
-  //  -- rdi : constructor
-  //  -- rsp[0] : return address
-  //  -- rsp[8] : last argument
-  // -----------------------------------
-  if (FLAG_debug_code) {
-    // The array construct code is only set for the builtin and internal
-    // Array functions which always have a map.
-
-    // Initial map for the builtin Array function should be a map.
-    __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
-    // Will both indicate a NULL and a Smi.
-    STATIC_ASSERT(kSmiTag == 0);
-    Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
-    __ Check(not_smi, "Unexpected initial map for Array function");
-    __ CmpObjectType(rcx, MAP_TYPE, rcx);
-    __ Check(equal, "Unexpected initial map for Array function");
-  }
-
-  Label generic_constructor;
-  // Run the native code for the Array function called as constructor.
-  ArrayNativeCode(masm, &generic_constructor);
-  // Jump to the generic construct code in case the specialized code cannot
-  // handle the construction.
-  __ bind(&generic_constructor);
-  Handle<Code> generic_construct_stub =
-      masm->isolate()->builtins()->JSConstructStubGeneric();
-  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
-}
-
-
-
 void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax                 : number of arguments
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 8868c7a..1be60ab 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1232,7 +1232,7 @@
 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
   Label right_arg_changed, call_runtime;
 
-  if (op_ == Token::MOD && has_fixed_right_arg_) {
+  if (op_ == Token::MOD && encoded_right_arg_.has_value) {
     // It is guaranteed that the value will fit into a Smi, because if it
     // didn't, we wouldn't be here, see BinaryOp_Patch.
     __ Cmp(rax, Smi::FromInt(fixed_right_arg_value()));
@@ -3436,11 +3436,10 @@
   __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
   __ movzxbq(scratch,
              FieldOperand(scratch, Map::kInstanceTypeOffset));
-  // Ensure that no non-strings have the internalized bit set.
-  STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask);
   STATIC_ASSERT(kInternalizedTag != 0);
-  __ testb(scratch, Immediate(kIsInternalizedMask));
-  __ j(zero, label);
+  __ and_(scratch, Immediate(kIsNotStringMask | kIsInternalizedMask));
+  __ cmpb(scratch, Immediate(kInternalizedTag | kStringTag));
+  __ j(not_equal, label);
 }
 
 
@@ -3717,51 +3716,12 @@
 }
 
 
-static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
-  // Cache the called function in a global property cell.  Cache states
-  // are uninitialized, monomorphic (indicated by a JSFunction), and
-  // megamorphic.
-  // rbx : cache cell for call target
-  // rdi : the function to call
-  Isolate* isolate = masm->isolate();
-  Label initialize, done;
-
-  // Load the cache state into rcx.
-  __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
-
-  // A monomorphic cache hit or an already megamorphic state: invoke the
-  // function without changing the state.
-  __ cmpq(rcx, rdi);
-  __ j(equal, &done, Label::kNear);
-  __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
-  __ j(equal, &done, Label::kNear);
-
-  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
-  // megamorphic.
-  __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
-  __ j(equal, &initialize, Label::kNear);
-  // MegamorphicSentinel is an immortal immovable object (undefined) so no
-  // write-barrier is needed.
-  __ Move(FieldOperand(rbx, Cell::kValueOffset),
-          TypeFeedbackCells::MegamorphicSentinel(isolate));
-  __ jmp(&done, Label::kNear);
-
-  // An uninitialized cache is patched with the function.
-  __ bind(&initialize);
-  __ movq(FieldOperand(rbx, Cell::kValueOffset), rdi);
-  // No need for a write barrier here - cells are rescanned.
-
-  __ bind(&done);
-}
-
-
 static void GenerateRecordCallTarget(MacroAssembler* masm) {
   // Cache the called function in a global property cell.  Cache states
   // are uninitialized, monomorphic (indicated by a JSFunction), and
   // megamorphic.
   // rbx : cache cell for call target
   // rdi : the function to call
-  ASSERT(FLAG_optimize_constructed_arrays);
   Isolate* isolate = masm->isolate();
   Label initialize, done, miss, megamorphic, not_array_function;
 
@@ -3778,12 +3738,15 @@
   // Special handling of the Array() function, which caches not only the
   // monomorphic Array function but the initial ElementsKind with special
   // sentinels
-  Handle<Object> terminal_kind_sentinel =
-      TypeFeedbackCells::MonomorphicArraySentinel(isolate,
-                                                  LAST_FAST_ELEMENTS_KIND);
   __ JumpIfNotSmi(rcx, &miss);
-  __ Cmp(rcx, terminal_kind_sentinel);
-  __ j(above, &miss);
+  if (FLAG_debug_code) {
+    Handle<Object> terminal_kind_sentinel =
+        TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
+                                                    LAST_FAST_ELEMENTS_KIND);
+    __ Cmp(rcx, terminal_kind_sentinel);
+    __ Assert(less_equal, "Array function sentinel is not an ElementsKind");
+  }
+
   // Make sure the function is the Array() function
   __ LoadArrayFunction(rcx);
   __ cmpq(rdi, rcx);
@@ -3860,11 +3823,7 @@
   __ j(not_equal, &slow);
 
   if (RecordCallTarget()) {
-    if (FLAG_optimize_constructed_arrays) {
-      GenerateRecordCallTarget(masm);
-    } else {
-      GenerateRecordCallTargetNoArray(masm);
-    }
+    GenerateRecordCallTarget(masm);
   }
 
   // Fast-case: Just invoke the function.
@@ -3939,15 +3898,11 @@
   __ j(not_equal, &slow);
 
   if (RecordCallTarget()) {
-    if (FLAG_optimize_constructed_arrays) {
-      GenerateRecordCallTarget(masm);
-    } else {
-      GenerateRecordCallTargetNoArray(masm);
-    }
+    GenerateRecordCallTarget(masm);
   }
 
   // Jump to the function-specific construct stub.
-  Register jmp_reg = FLAG_optimize_constructed_arrays ? rcx : rbx;
+  Register jmp_reg = rcx;
   __ movq(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
   __ movq(jmp_reg, FieldOperand(jmp_reg,
                                 SharedFunctionInfo::kConstructStubOffset));
@@ -3995,9 +3950,7 @@
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
   // It is important that the store buffer overflow stubs are generated first.
   RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
-  if (FLAG_optimize_constructed_arrays) {
-    ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
-  }
+  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
 }
 
 
@@ -4179,6 +4132,8 @@
   // this by performing a garbage collection and retrying the
   // builtin once.
 
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // Enter the exit frame that transitions from JavaScript to C++.
 #ifdef _WIN64
   int arg_stack_space = (result_size_ < 2 ? 2 : 4);
@@ -4259,6 +4214,8 @@
   Label invoke, handler_entry, exit;
   Label not_outermost_js, not_outermost_js_2;
 
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   {  // NOLINT. Scope block confuses linter.
     MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
     // Set up frame.
@@ -5894,9 +5851,13 @@
   __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
   __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kInternalizedTag != 0);
-  __ and_(tmp1, tmp2);
-  __ testb(tmp1, Immediate(kIsInternalizedMask));
-  __ j(zero, &miss, Label::kNear);
+  __ and_(tmp1, Immediate(kIsNotStringMask | kIsInternalizedMask));
+  __ cmpb(tmp1, Immediate(kInternalizedTag | kStringTag));
+  __ j(not_equal, &miss, Label::kNear);
+
+  __ and_(tmp2, Immediate(kIsNotStringMask | kIsInternalizedMask));
+  __ cmpb(tmp2, Immediate(kInternalizedTag | kStringTag));
+  __ j(not_equal, &miss, Label::kNear);
 
   // Internalized strings are compared by identity.
   Label done;
@@ -5939,19 +5900,8 @@
   __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
   __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
 
-  Label succeed1;
-  __ testb(tmp1, Immediate(kIsInternalizedMask));
-  __ j(not_zero, &succeed1, Label::kNear);
-  __ cmpb(tmp1, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
-  __ j(not_equal, &miss, Label::kNear);
-  __ bind(&succeed1);
-
-  Label succeed2;
-  __ testb(tmp2, Immediate(kIsInternalizedMask));
-  __ j(not_zero, &succeed2, Label::kNear);
-  __ cmpb(tmp2, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
-  __ j(not_equal, &miss, Label::kNear);
-  __ bind(&succeed2);
+  __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
+  __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
 
   // Unique names are compared by identity.
   Label done;
@@ -6013,7 +5963,8 @@
   __ bind(&not_same);
 
   // Check that both strings are internalized strings. If they are, we're done
-  // because we already know they are not identical.
+  // because we already know they are not identical. We also know they are both
+  // strings.
   if (equality) {
     Label do_compare;
     STATIC_ASSERT(kInternalizedTag != 0);
@@ -6169,13 +6120,8 @@
 
     // Check if the entry name is not a unique name.
     __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
-    __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-             Immediate(kIsInternalizedMask));
-    __ j(not_zero, &good, Label::kNear);
-    __ cmpb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-            Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
-    __ j(not_equal, miss);
-
+    __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+                           miss);
     __ bind(&good);
   }
 
@@ -6301,15 +6247,9 @@
       // key we are looking for.
 
       // Check if the entry name is not a unique name.
-      Label cont;
       __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
-      __ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
-               Immediate(kIsInternalizedMask));
-      __ j(not_zero, &cont);
-      __ cmpb(FieldOperand(scratch, Map::kInstanceTypeOffset),
-              Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
-      __ j(not_equal, &maybe_in_dictionary);
-      __ bind(&cont);
+      __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
+                             &maybe_in_dictionary);
     }
   }
 
@@ -6730,7 +6670,11 @@
 
 
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
-  if (entry_hook_ != NULL) {
+  if (masm->isolate()->function_entry_hook() != NULL) {
+    // It's always safe to call the entry hook stub, as the hook itself
+    // is not allowed to call back to V8.
+    AllowStubCallsScope allow_stub_calls(masm, true);
+
     ProfileEntryHookStub stub;
     masm->CallStub(&stub);
   }
@@ -6738,45 +6682,25 @@
 
 
 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
-  // Save volatile registers.
-  // Live registers at this point are the same as at the start of any
-  // JS function:
-  //   o rdi: the JS function object being called (i.e. ourselves)
-  //   o rsi: our context
-  //   o rbp: our caller's frame pointer
-  //   o rsp: stack pointer (pointing to return address)
-  //   o rcx: rcx is zero for method calls and non-zero for function calls.
-#ifdef _WIN64
-  const int kNumSavedRegisters = 1;
-
-  __ push(rcx);
-#else
-  const int kNumSavedRegisters = 3;
-
-  __ push(rcx);
-  __ push(rdi);
-  __ push(rsi);
-#endif
+  // This stub can be called from essentially anywhere, so it needs to save
+  // all volatile and callee-save registers.
+  const size_t kNumSavedRegisters = 2;
+  __ push(arg_reg_1);
+  __ push(arg_reg_2);
 
   // Calculate the original stack pointer and store it in the second arg.
-#ifdef _WIN64
-  __ lea(rdx, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize));
-#else
-  __ lea(rsi, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize));
-#endif
+  __ lea(arg_reg_2, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize));
 
   // Calculate the function address to the first arg.
-#ifdef _WIN64
-  __ movq(rcx, Operand(rsp, kNumSavedRegisters * kPointerSize));
-  __ subq(rcx, Immediate(Assembler::kShortCallInstructionLength));
-#else
-  __ movq(rdi, Operand(rsp, kNumSavedRegisters * kPointerSize));
-  __ subq(rdi, Immediate(Assembler::kShortCallInstructionLength));
-#endif
+  __ movq(arg_reg_1, Operand(rsp, kNumSavedRegisters * kPointerSize));
+  __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+
+  // Save the remainder of the volatile registers.
+  masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
 
   // Call the entry hook function.
-  __ movq(rax, &entry_hook_, RelocInfo::NONE64);
-  __ movq(rax, Operand(rax, 0));
+  __ movq(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()),
+          RelocInfo::NONE64);
 
   AllowExternalCallThatCantCauseGC scope(masm);
 
@@ -6785,13 +6709,9 @@
   __ CallCFunction(rax, kArgumentCount);
 
   // Restore volatile regs.
-#ifdef _WIN64
-  __ pop(rcx);
-#else
-  __ pop(rsi);
-  __ pop(rdi);
-  __ pop(rcx);
-#endif
+  masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
+  __ pop(arg_reg_2);
+  __ pop(arg_reg_1);
 
   __ Ret();
 }
@@ -6850,6 +6770,10 @@
   __ Cmp(rbx, undefined_sentinel);
   __ j(equal, &normal_sequence);
 
+  // The type cell may have gone megamorphic, don't overwrite if so
+  __ movq(rcx, FieldOperand(rbx, kPointerSize));
+  __ JumpIfNotSmi(rcx, &normal_sequence);
+
   // Save the resulting elements kind in type info
   __ Integer32ToSmi(rdx, rdx);
   __ movq(FieldOperand(rbx, kPointerSize), rdx);
@@ -6882,7 +6806,7 @@
     T stub(kind);
     stub.GetCode(isolate)->set_is_pregenerated(true);
     if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
-      T stub1(kind, true);
+      T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
       stub1.GetCode(isolate)->set_is_pregenerated(true);
     }
   }
@@ -6949,52 +6873,39 @@
     __ bind(&okay_here);
   }
 
-  if (FLAG_optimize_constructed_arrays) {
-    Label no_info, switch_ready;
-    // Get the elements kind and case on that.
-    __ Cmp(rbx, undefined_sentinel);
-    __ j(equal, &no_info);
-    __ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
-    __ JumpIfNotSmi(rdx, &no_info);
-    __ SmiToInteger32(rdx, rdx);
-    __ jmp(&switch_ready);
-    __ bind(&no_info);
-    __ movq(rdx, Immediate(GetInitialFastElementsKind()));
-    __ bind(&switch_ready);
+  Label no_info, switch_ready;
+  // Get the elements kind and case on that.
+  __ Cmp(rbx, undefined_sentinel);
+  __ j(equal, &no_info);
+  __ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
+  __ JumpIfNotSmi(rdx, &no_info);
+  __ SmiToInteger32(rdx, rdx);
+  __ jmp(&switch_ready);
+  __ bind(&no_info);
+  __ movq(rdx, Immediate(GetInitialFastElementsKind()));
+  __ bind(&switch_ready);
 
-    if (argument_count_ == ANY) {
-      Label not_zero_case, not_one_case;
-      __ testq(rax, rax);
-      __ j(not_zero, &not_zero_case);
-      CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+  if (argument_count_ == ANY) {
+    Label not_zero_case, not_one_case;
+    __ testq(rax, rax);
+    __ j(not_zero, &not_zero_case);
+    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
 
-      __ bind(&not_zero_case);
-      __ cmpl(rax, Immediate(1));
-      __ j(greater, &not_one_case);
-      CreateArrayDispatchOneArgument(masm);
+    __ bind(&not_zero_case);
+    __ cmpl(rax, Immediate(1));
+    __ j(greater, &not_one_case);
+    CreateArrayDispatchOneArgument(masm);
 
-      __ bind(&not_one_case);
-      CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
-    } else if (argument_count_ == NONE) {
-      CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-    } else if (argument_count_ == ONE) {
-      CreateArrayDispatchOneArgument(masm);
-    } else if (argument_count_ == MORE_THAN_ONE) {
-      CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
-    } else {
-      UNREACHABLE();
-    }
+    __ bind(&not_one_case);
+    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
+  } else if (argument_count_ == NONE) {
+    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+  } else if (argument_count_ == ONE) {
+    CreateArrayDispatchOneArgument(masm);
+  } else if (argument_count_ == MORE_THAN_ONE) {
+    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
   } else {
-    Label generic_constructor;
-    // Run the native code for the Array function called as constructor.
-    ArrayNativeCode(masm, &generic_constructor);
-
-    // Jump to the generic construct code in case the specialized code cannot
-    // handle the construction.
-    __ bind(&generic_constructor);
-    Handle<Code> generic_construct_stub =
-        masm->isolate()->builtins()->JSConstructStubGeneric();
-    __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+    UNREACHABLE();
   }
 }
 
@@ -7058,46 +6969,33 @@
     __ Check(equal, "Unexpected initial map for Array function");
   }
 
-  if (FLAG_optimize_constructed_arrays) {
-    // Figure out the right elements kind
-    __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+  // Figure out the right elements kind
+  __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
 
-    // Load the map's "bit field 2" into |result|. We only need the first byte,
-    // but the following masking takes care of that anyway.
-    __ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
-    // Retrieve elements_kind from bit field 2.
-    __ and_(rcx, Immediate(Map::kElementsKindMask));
-    __ shr(rcx, Immediate(Map::kElementsKindShift));
+  // Load the map's "bit field 2" into |result|. We only need the first byte,
+  // but the following masking takes care of that anyway.
+  __ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+  // Retrieve elements_kind from bit field 2.
+  __ and_(rcx, Immediate(Map::kElementsKindMask));
+  __ shr(rcx, Immediate(Map::kElementsKindShift));
 
-    if (FLAG_debug_code) {
-      Label done;
-      __ cmpl(rcx, Immediate(FAST_ELEMENTS));
-      __ j(equal, &done);
-      __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
-      __ Assert(equal,
-          "Invalid ElementsKind for InternalArray or InternalPackedArray");
-      __ bind(&done);
-    }
-
-    Label fast_elements_case;
+  if (FLAG_debug_code) {
+    Label done;
     __ cmpl(rcx, Immediate(FAST_ELEMENTS));
-    __ j(equal, &fast_elements_case);
-    GenerateCase(masm, FAST_HOLEY_ELEMENTS);
-
-    __ bind(&fast_elements_case);
-    GenerateCase(masm, FAST_ELEMENTS);
-  } else {
-    Label generic_constructor;
-    // Run the native code for the Array function called as constructor.
-    ArrayNativeCode(masm, &generic_constructor);
-
-    // Jump to the generic construct code in case the specialized code cannot
-    // handle the construction.
-    __ bind(&generic_constructor);
-    Handle<Code> generic_construct_stub =
-        masm->isolate()->builtins()->JSConstructStubGeneric();
-    __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+    __ j(equal, &done);
+    __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
+    __ Assert(equal,
+              "Invalid ElementsKind for InternalArray or InternalPackedArray");
+    __ bind(&done);
   }
+
+  Label fast_elements_case;
+  __ cmpl(rcx, Immediate(FAST_ELEMENTS));
+  __ j(equal, &fast_elements_case);
+  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+  __ bind(&fast_elements_case);
+  GenerateCase(masm, FAST_ELEMENTS);
 }
 
 
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 2ac56a1..7a7ec7b 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -346,7 +346,7 @@
 
   // Allocate new backing store.
   __ bind(&new_backing_store);
-  __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+  __ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
   __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
   // Set backing store's map
   __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
@@ -381,7 +381,7 @@
   // Conversion loop.
   __ bind(&loop);
   __ movq(rbx,
-          FieldOperand(r8, r9, times_8, FixedArray::kHeaderSize));
+          FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
   // r9 : current element's index
   // rbx: current element (smi-tagged)
   __ JumpIfNotSmi(rbx, &convert_hole);
@@ -459,7 +459,7 @@
   __ bind(&loop);
   __ movq(r14, FieldOperand(r8,
                             r9,
-                            times_pointer_size,
+                            times_8,
                             FixedDoubleArray::kHeaderSize));
   // r9 : current element's index
   // r14: current element
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 21682c2..60e676a 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -548,7 +548,7 @@
   // last FrameDescription**.
   __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
   __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
-  __ lea(rdx, Operand(rax, rdx, times_8, 0));
+  __ lea(rdx, Operand(rax, rdx, times_pointer_size, 0));
   __ jmp(&outer_loop_header);
   __ bind(&outer_push_loop);
   // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
diff --git a/src/x64/frames-x64.cc b/src/x64/frames-x64.cc
index a811a34..21cb79c 100644
--- a/src/x64/frames-x64.cc
+++ b/src/x64/frames-x64.cc
@@ -32,17 +32,12 @@
 #include "assembler.h"
 #include "assembler-x64.h"
 #include "assembler-x64-inl.h"
-#include "frames-inl.h"
+#include "frames.h"
 
 namespace v8 {
 namespace internal {
 
 
-Address ExitFrame::ComputeStackPointer(Address fp) {
-  return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
 Register JavaScriptFrame::fp_register() { return rbp; }
 Register JavaScriptFrame::context_register() { return rsi; }
 
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index e5d190a..7732a1d 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1663,10 +1663,10 @@
         ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
-        if (key->handle()->IsInternalizedString()) {
+        if (key->value()->IsInternalizedString()) {
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
-            __ Move(rcx, key->handle());
+            __ Move(rcx, key->value());
             __ movq(rdx, Operand(rsp, 0));
             Handle<Code> ic = is_classic_mode()
                 ? isolate()->builtins()->StoreIC_Initialize()
@@ -1806,10 +1806,7 @@
     Expression* subexpr = subexprs->at(i);
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
-    if (subexpr->AsLiteral() != NULL ||
-        CompileTimeValue::IsCompileTimeValue(subexpr)) {
-      continue;
-    }
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     if (!result_saved) {
       __ push(rax);  // array literal
@@ -1976,14 +1973,31 @@
       __ push(result_register());
       // Fall through.
     case Yield::INITIAL: {
-      VisitForStackValue(expr->generator_object());
+      Label suspend, continuation, post_runtime, resume;
+
+      __ jmp(&suspend);
+
+      __ bind(&continuation);
+      __ jmp(&resume);
+
+      __ bind(&suspend);
+      VisitForAccumulatorValue(expr->generator_object());
+      ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+      __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
+              Smi::FromInt(continuation.pos()));
+      __ movq(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+      __ movq(rcx, rsi);
+      __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
+                          kDontSaveFPRegs);
+      __ lea(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
+      __ cmpq(rsp, rbx);
+      __ j(equal, &post_runtime);
+      __ push(rax);  // generator object
       __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ movq(context_register(),
               Operand(rbp, StandardFrameConstants::kContextOffset));
+      __ bind(&post_runtime);
 
-      Label resume;
-      __ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
-      __ j(not_equal, &resume);
       __ pop(result_register());
       EmitReturnSequence();
 
@@ -2011,7 +2025,8 @@
       // [sp + 1 * kPointerSize] iter
       // [sp + 0 * kPointerSize] g
 
-      Label l_catch, l_try, l_resume, l_next, l_call, l_loop;
+      Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+      Label l_next, l_call, l_loop;
       // Initial send value is undefined.
       __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
       __ jmp(&l_next);
@@ -2033,12 +2048,23 @@
       __ PushTryHandler(StackHandler::CATCH, expr->index());
       const int handler_size = StackHandlerConstants::kSize;
       __ push(rax);                                      // result
-      __ push(Operand(rsp, (0 + 1) * kPointerSize + handler_size));  // g
+      __ jmp(&l_suspend);
+      __ bind(&l_continuation);
+      __ jmp(&l_resume);
+      __ bind(&l_suspend);
+      const int generator_object_depth = kPointerSize + handler_size;
+      __ movq(rax, Operand(rsp, generator_object_depth));
+      __ push(rax);                                      // g
+      ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+      __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
+              Smi::FromInt(l_continuation.pos()));
+      __ movq(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+      __ movq(rcx, rsi);
+      __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
+                          kDontSaveFPRegs);
       __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ movq(context_register(),
               Operand(rbp, StandardFrameConstants::kContextOffset));
-      __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
-      __ j(not_equal, &l_resume);
       __ pop(rax);                                       // result
       EmitReturnSequence();
       __ bind(&l_resume);                                // received in rax
@@ -2218,7 +2244,7 @@
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
-  __ Move(rcx, key->handle());
+  __ Move(rcx, key->value());
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
 }
@@ -2337,7 +2363,7 @@
       VisitForAccumulatorValue(prop->obj());
       __ movq(rdx, rax);
       __ pop(rax);  // Restore value.
-      __ Move(rcx, prop->key()->AsLiteral()->handle());
+      __ Move(rcx, prop->key()->AsLiteral()->value());
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->StoreIC_Initialize()
           : isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2460,7 +2486,7 @@
 
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  __ Move(rcx, prop->key()->AsLiteral()->handle());
+  __ Move(rcx, prop->key()->AsLiteral()->value());
   __ pop(rdx);
   Handle<Code> ic = is_classic_mode()
       ? isolate()->builtins()->StoreIC_Initialize()
@@ -2720,7 +2746,7 @@
     }
     if (property->key()->IsPropertyName()) {
       EmitCallWithIC(expr,
-                     property->key()->AsLiteral()->handle(),
+                     property->key()->AsLiteral()->value(),
                      RelocInfo::CODE_TARGET);
     } else {
       EmitKeyedCallWithIC(expr, property->key());
@@ -3325,7 +3351,7 @@
   ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   ASSERT_NE(NULL, args->at(1)->AsLiteral());
-  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
@@ -3746,7 +3772,7 @@
   ASSERT_EQ(2, args->length());
 
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
-  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
 
   Handle<FixedArray> jsfunction_result_caches(
       isolate()->native_context()->jsfunction_result_caches());
@@ -4484,7 +4510,7 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ Move(rcx, prop->key()->AsLiteral()->handle());
+      __ Move(rcx, prop->key()->AsLiteral()->value());
       __ pop(rdx);
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->StoreIC_Initialize()
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index ce91eff..82112a0 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -337,7 +337,8 @@
   __ testl(hash, Immediate(Name::kContainsCachedArrayIndexMask));
   __ j(zero, index_string);  // The value in hash is used at jump target.
 
-  // Is the string internalized?
+  // Is the string internalized? We already know it's a string so a single
+  // bit test is enough.
   STATIC_ASSERT(kInternalizedTag != 0);
   __ testb(FieldOperand(map, Map::kInstanceTypeOffset),
            Immediate(kIsInternalizedMask));
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index beaa1f1..f3045d5 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -67,7 +67,7 @@
 #define __ masm()->
 
 bool LCodeGen::GenerateCode() {
-  HPhase phase("Z_Code generation", chunk());
+  LPhase phase("Z_Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
 
@@ -92,7 +92,7 @@
     RegisterDependentCodeForEmbeddedMaps(code);
   }
   PopulateDeoptimizationData(code);
-  info()->CommitDependentMaps(code);
+  info()->CommitDependencies(code);
 }
 
 
@@ -511,27 +511,6 @@
 
   for (int i = 0; i < translation_size; ++i) {
     LOperand* value = environment->values()->at(i);
-    // spilled_registers_ and spilled_double_registers_ are either
-    // both NULL or both set.
-    if (environment->spilled_registers() != NULL && value != NULL) {
-      if (value->IsRegister() &&
-          environment->spilled_registers()[value->index()] != NULL) {
-        translation->MarkDuplicate();
-        AddToTranslation(translation,
-                         environment->spilled_registers()[value->index()],
-                         environment->HasTaggedValueAt(i),
-                         environment->HasUint32ValueAt(i));
-      } else if (
-          value->IsDoubleRegister() &&
-          environment->spilled_double_registers()[value->index()] != NULL) {
-        translation->MarkDuplicate();
-        AddToTranslation(
-            translation,
-            environment->spilled_double_registers()[value->index()],
-            false,
-            false);
-      }
-    }
 
     // TODO(mstarzinger): Introduce marker operands to indicate that this value
     // is not present and must be reconstructed from the deoptimizer. Currently
@@ -541,12 +520,6 @@
       translation->BeginArgumentsObject(arguments_count);
       for (int i = 0; i < arguments_count; ++i) {
         LOperand* value = environment->values()->at(translation_size + i);
-        ASSERT(environment->spilled_registers() == NULL ||
-               !value->IsRegister() ||
-               environment->spilled_registers()[value->index()] == NULL);
-        ASSERT(environment->spilled_registers() == NULL ||
-               !value->IsDoubleRegister() ||
-               environment->spilled_double_registers()[value->index()] == NULL);
         AddToTranslation(translation,
                          value,
                          environment->HasTaggedValueAt(translation_size + i),
@@ -1000,7 +973,8 @@
 
 
 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
-  // Nothing to do.
+  // Record the address of the first unknown OSR value as the place to enter.
+  if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
 }
 
 
@@ -1034,12 +1008,12 @@
     __ andl(left_reg, Immediate(divisor - 1));
     __ bind(&done);
 
-  } else if (hmod->has_fixed_right_arg()) {
+  } else if (hmod->fixed_right_arg().has_value) {
     Register left_reg = ToRegister(instr->left());
     ASSERT(left_reg.is(ToRegister(instr->result())));
     Register right_reg = ToRegister(instr->right());
 
-    int32_t divisor = hmod->fixed_right_arg_value();
+    int32_t divisor = hmod->fixed_right_arg().value;
     ASSERT(IsPowerOf2(divisor));
 
     // Check if our assumption of a fixed right operand still holds.
@@ -1612,8 +1586,11 @@
   Register result = ToRegister(instr->result());
   ASSERT(input.is(result));
   Label done;
-  // If the object is a smi return the object.
-  __ JumpIfSmi(input, &done, Label::kNear);
+
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    // If the object is a smi return the object.
+    __ JumpIfSmi(input, &done, Label::kNear);
+  }
 
   // If the object is not a value type, return the object.
   __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
@@ -1862,10 +1839,12 @@
 }
 
 
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
+  int right_block = instr->FalseDestination(chunk_);
+  int left_block = instr->TrueDestination(chunk_);
+
   int next_block = GetNextEmittedBlock();
-  right_block = chunk_->LookupDestination(right_block);
-  left_block = chunk_->LookupDestination(left_block);
 
   if (right_block == left_block) {
     EmitGoto(left_block);
@@ -1888,26 +1867,23 @@
 
 
 void LCodeGen::DoBranch(LBranch* instr) {
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
   Representation r = instr->hydrogen()->value()->representation();
   if (r.IsInteger32()) {
     ASSERT(!info()->IsStub());
     Register reg = ToRegister(instr->value());
     __ testl(reg, reg);
-    EmitBranch(true_block, false_block, not_zero);
+    EmitBranch(instr, not_zero);
   } else if (r.IsSmi()) {
     ASSERT(!info()->IsStub());
     Register reg = ToRegister(instr->value());
     __ testq(reg, reg);
-    EmitBranch(true_block, false_block, not_zero);
+    EmitBranch(instr, not_zero);
   } else if (r.IsDouble()) {
     ASSERT(!info()->IsStub());
     XMMRegister reg = ToDoubleRegister(instr->value());
     __ xorps(xmm0, xmm0);
     __ ucomisd(reg, xmm0);
-    EmitBranch(true_block, false_block, not_equal);
+    EmitBranch(instr, not_equal);
   } else {
     ASSERT(r.IsTagged());
     Register reg = ToRegister(instr->value());
@@ -1915,43 +1891,52 @@
     if (type.IsBoolean()) {
       ASSERT(!info()->IsStub());
       __ CompareRoot(reg, Heap::kTrueValueRootIndex);
-      EmitBranch(true_block, false_block, equal);
+      EmitBranch(instr, equal);
     } else if (type.IsSmi()) {
       ASSERT(!info()->IsStub());
       __ SmiCompare(reg, Smi::FromInt(0));
-      EmitBranch(true_block, false_block, not_equal);
+      EmitBranch(instr, not_equal);
+    } else if (type.IsJSArray()) {
+      ASSERT(!info()->IsStub());
+      EmitBranch(instr, no_condition);
+    } else if (type.IsHeapNumber()) {
+      ASSERT(!info()->IsStub());
+      __ xorps(xmm0, xmm0);
+      __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+      EmitBranch(instr, not_equal);
+    } else if (type.IsString()) {
+      ASSERT(!info()->IsStub());
+      __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+      EmitBranch(instr, not_equal);
     } else {
-      Label* true_label = chunk_->GetAssemblyLabel(true_block);
-      Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
 
       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
         // undefined -> false.
         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
-        __ j(equal, false_label);
+        __ j(equal, instr->FalseLabel(chunk_));
       }
       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
         // true -> true.
         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
-        __ j(equal, true_label);
+        __ j(equal, instr->TrueLabel(chunk_));
         // false -> false.
         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
-        __ j(equal, false_label);
+        __ j(equal, instr->FalseLabel(chunk_));
       }
       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
         // 'null' -> false.
         __ CompareRoot(reg, Heap::kNullValueRootIndex);
-        __ j(equal, false_label);
+        __ j(equal, instr->FalseLabel(chunk_));
       }
 
       if (expected.Contains(ToBooleanStub::SMI)) {
         // Smis: 0 -> false, all other -> true.
         __ Cmp(reg, Smi::FromInt(0));
-        __ j(equal, false_label);
-        __ JumpIfSmi(reg, true_label);
+        __ j(equal, instr->FalseLabel(chunk_));
+        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
       } else if (expected.NeedsMap()) {
         // If we need a map later and have a Smi -> deopt.
         __ testb(reg, Immediate(kSmiTagMask));
@@ -1966,14 +1951,14 @@
           // Undetectable -> false.
           __ testb(FieldOperand(map, Map::kBitFieldOffset),
                    Immediate(1 << Map::kIsUndetectable));
-          __ j(not_zero, false_label);
+          __ j(not_zero, instr->FalseLabel(chunk_));
         }
       }
 
       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
         // spec object -> true.
         __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
-        __ j(above_equal, true_label);
+        __ j(above_equal, instr->TrueLabel(chunk_));
       }
 
       if (expected.Contains(ToBooleanStub::STRING)) {
@@ -1982,8 +1967,8 @@
         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
         __ j(above_equal, &not_string, Label::kNear);
         __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
-        __ j(not_zero, true_label);
-        __ jmp(false_label);
+        __ j(not_zero, instr->TrueLabel(chunk_));
+        __ jmp(instr->FalseLabel(chunk_));
         __ bind(&not_string);
       }
 
@@ -1994,13 +1979,16 @@
         __ j(not_equal, &not_heap_number, Label::kNear);
         __ xorps(xmm0, xmm0);
         __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
-        __ j(zero, false_label);
-        __ jmp(true_label);
+        __ j(zero, instr->FalseLabel(chunk_));
+        __ jmp(instr->TrueLabel(chunk_));
         __ bind(&not_heap_number);
       }
 
-      // We've seen something for the first time -> deopt.
-      DeoptimizeIf(no_condition, instr->environment());
+      if (!expected.IsGeneric()) {
+        // We've seen something for the first time -> deopt.
+        // This can only happen if we are not generic already.
+        DeoptimizeIf(no_condition, instr->environment());
+      }
     }
   }
 }
@@ -2049,24 +2037,21 @@
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
   Condition cc = TokenToCondition(instr->op(), instr->is_double());
 
   if (left->IsConstantOperand() && right->IsConstantOperand()) {
     // We can statically evaluate the comparison.
     double left_val = ToDouble(LConstantOperand::cast(left));
     double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block =
-      EvalComparison(instr->op(), left_val, right_val) ? true_block
-                                                       : false_block;
+    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
     EmitGoto(next_block);
   } else {
     if (instr->is_double()) {
       // Don't base result on EFLAGS when a NaN is involved. Instead
       // jump to the false block.
       __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
-      __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+      __ j(parity_even, instr->FalseLabel(chunk_));
     } else {
       int32_t value;
       if (right->IsConstantOperand()) {
@@ -2105,15 +2090,13 @@
         }
       }
     }
-    EmitBranch(true_block, false_block, cc);
+    EmitBranch(instr, cc);
   }
 }
 
 
 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   Register left = ToRegister(instr->left());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
 
   if (instr->right()->IsConstantOperand()) {
     Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
@@ -2122,17 +2105,15 @@
     Register right = ToRegister(instr->right());
     __ cmpq(left, right);
   }
-  EmitBranch(true_block, false_block, equal);
+  EmitBranch(instr, equal);
 }
 
 
 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
   Register left = ToRegister(instr->left());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
   __ cmpq(left, Immediate(instr->hydrogen()->right()));
-  EmitBranch(true_block, false_block, equal);
+  EmitBranch(instr, equal);
 }
 
 
@@ -2164,21 +2145,21 @@
 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
   Register reg = ToRegister(instr->value());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* true_label = chunk_->GetAssemblyLabel(true_block);
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+  Condition true_cond = EmitIsObject(
+      reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
 
-  Condition true_cond = EmitIsObject(reg, false_label, true_label);
-
-  EmitBranch(true_block, false_block, true_cond);
+  EmitBranch(instr, true_cond);
 }
 
 
 Condition LCodeGen::EmitIsString(Register input,
                                  Register temp1,
-                                 Label* is_not_string) {
-  __ JumpIfSmi(input, is_not_string);
+                                 Label* is_not_string,
+                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
+  if (check_needed == INLINE_SMI_CHECK) {
+    __ JumpIfSmi(input, is_not_string);
+  }
+
   Condition cond =  masm_->IsObjectStringType(input, temp1, temp1);
 
   return cond;
@@ -2189,20 +2170,18 @@
   Register reg = ToRegister(instr->value());
   Register temp = ToRegister(instr->temp());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+  SmiCheck check_needed =
+      instr->hydrogen()->value()->IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
 
-  Condition true_cond = EmitIsString(reg, temp, false_label);
+  Condition true_cond = EmitIsString(
+      reg, temp, instr->FalseLabel(chunk_), check_needed);
 
-  EmitBranch(true_block, false_block, true_cond);
+  EmitBranch(instr, true_cond);
 }
 
 
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
   Condition is_smi;
   if (instr->value()->IsRegister()) {
     Register input = ToRegister(instr->value());
@@ -2211,7 +2190,7 @@
     Operand input = ToOperand(instr->value());
     is_smi = masm()->CheckSmi(input);
   }
-  EmitBranch(true_block, false_block, is_smi);
+  EmitBranch(instr, is_smi);
 }
 
 
@@ -2219,21 +2198,18 @@
   Register input = ToRegister(instr->value());
   Register temp = ToRegister(instr->temp());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
   __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
   __ testb(FieldOperand(temp, Map::kBitFieldOffset),
            Immediate(1 << Map::kIsUndetectable));
-  EmitBranch(true_block, false_block, not_zero);
+  EmitBranch(instr, not_zero);
 }
 
 
 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   Token::Value op = instr->op();
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -2241,7 +2217,7 @@
   Condition condition = TokenToCondition(op, false);
   __ testq(rax, rax);
 
-  EmitBranch(true_block, false_block, condition);
+  EmitBranch(instr, condition);
 }
 
 
@@ -2268,15 +2244,12 @@
 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   Register input = ToRegister(instr->value());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  __ JumpIfSmi(input, false_label);
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
 
   __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
-  EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
+  EmitBranch(instr, BranchCondition(instr->hydrogen()));
 }
 
 
@@ -2296,12 +2269,9 @@
     LHasCachedArrayIndexAndBranch* instr) {
   Register input = ToRegister(instr->value());
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
   __ testl(FieldOperand(input, String::kHashFieldOffset),
            Immediate(String::kContainsCachedArrayIndexMask));
-  EmitBranch(true_block, false_block, equal);
+  EmitBranch(instr, equal);
 }
 
 
@@ -2379,25 +2349,18 @@
   Register temp2 = ToRegister(instr->temp2());
   Handle<String> class_name = instr->hydrogen()->class_name();
 
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+      class_name, input, temp, temp2);
 
-  Label* true_label = chunk_->GetAssemblyLabel(true_block);
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
-  EmitBranch(true_block, false_block, equal);
+  EmitBranch(instr, equal);
 }
 
 
 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   Register reg = ToRegister(instr->value());
-  int true_block = instr->true_block_id();
-  int false_block = instr->false_block_id();
 
   __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
-  EmitBranch(true_block, false_block, equal);
+  EmitBranch(instr, equal);
 }
 
 
@@ -2693,9 +2656,9 @@
   __ movq(target, value);
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
     SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+      instr->hydrogen()->value()->IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     int offset = Context::SlotOffset(instr->slot_index());
     Register scratch = ToRegister(instr->temp());
     __ RecordWriteContextSlot(context,
@@ -3899,11 +3862,9 @@
   ASSERT(ToRegister(instr->result()).is(rax));
 
   __ Set(rax, instr->arity());
-  if (FLAG_optimize_constructed_arrays) {
-    // No cell in ebx for construct type feedback in optimized code
-    Handle<Object> undefined_value(isolate()->factory()->undefined_value());
-    __ Move(rbx, undefined_value);
-  }
+  // No cell in ebx for construct type feedback in optimized code
+  Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+  __ Move(rbx, undefined_value);
   CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
   CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
 }
@@ -3912,16 +3873,18 @@
 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   ASSERT(ToRegister(instr->constructor()).is(rdi));
   ASSERT(ToRegister(instr->result()).is(rax));
-  ASSERT(FLAG_optimize_constructed_arrays);
 
   __ Set(rax, instr->arity());
   __ Move(rbx, instr->hydrogen()->property_cell());
   ElementsKind kind = instr->hydrogen()->elements_kind();
-  bool disable_allocation_sites =
-      (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
+  AllocationSiteOverrideMode override_mode =
+      (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE)
+          ? DISABLE_ALLOCATION_SITES
+          : DONT_OVERRIDE;
+  ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
 
   if (instr->arity() == 0) {
-    ArrayNoArgumentConstructorStub stub(kind, disable_allocation_sites);
+    ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   } else if (instr->arity() == 1) {
     Label done;
@@ -3934,18 +3897,18 @@
       __ j(zero, &packed_case);
 
       ElementsKind holey_kind = GetHoleyElementsKind(kind);
-      ArraySingleArgumentConstructorStub stub(holey_kind,
-                                              disable_allocation_sites);
+      ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+                                              override_mode);
       CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
       __ jmp(&done);
       __ bind(&packed_case);
     }
 
-    ArraySingleArgumentConstructorStub stub(kind, disable_allocation_sites);
+    ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
     __ bind(&done);
   } else {
-    ArrayNArgumentsConstructorStub stub(kind, disable_allocation_sites);
+    ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   }
 }
@@ -4021,9 +3984,9 @@
   }
 
   // Do the store.
-  HType type = instr->hydrogen()->value()->type();
   SmiCheck check_needed =
-      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+      instr->hydrogen()->value()->IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
 
   Register write_register = object;
   if (!access.IsInobject()) {
@@ -4262,9 +4225,9 @@
     ASSERT(instr->value()->IsRegister());
     Register value = ToRegister(instr->value());
     ASSERT(!instr->key()->IsConstantOperand());
-    HType type = instr->hydrogen()->value()->type();
     SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+        instr->hydrogen()->value()->IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
     Register key_reg(ToRegister(key));
     __ lea(key_reg, operand);
@@ -4930,9 +4893,11 @@
 
 
 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
-  LOperand* input = instr->value();
-  Condition cc = masm()->CheckSmi(ToRegister(input));
-  DeoptimizeIf(cc, instr->environment());
+  if (!instr->hydrogen()->value()->IsHeapObject()) {
+    LOperand* input = instr->value();
+    Condition cc = masm()->CheckSmi(ToRegister(input));
+    DeoptimizeIf(cc, instr->environment());
+  }
 }
 
 
@@ -5084,6 +5049,94 @@
 }
 
 
+void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
+  class DeferredAllocateObject: public LDeferredCode {
+   public:
+    DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LAllocateObject* instr_;
+  };
+
+  DeferredAllocateObject* deferred =
+      new(zone()) DeferredAllocateObject(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->temp());
+  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+  Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+  int instance_size = initial_map->instance_size();
+  ASSERT(initial_map->pre_allocated_property_fields() +
+         initial_map->unused_property_fields() -
+         initial_map->inobject_properties() == 0);
+
+  __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(),
+              TAG_OBJECT);
+
+  __ bind(deferred->exit());
+  if (FLAG_debug_code) {
+    Label is_in_new_space;
+    __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
+    __ Abort("Allocated object is not in new-space");
+    __ bind(&is_in_new_space);
+  }
+
+  // Load the initial map.
+  Register map = scratch;
+  __ LoadHeapObject(scratch, constructor);
+  __ movq(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
+
+  if (FLAG_debug_code) {
+    __ AssertNotSmi(map);
+    __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
+            Immediate(instance_size >> kPointerSizeLog2));
+    __ Assert(equal, "Unexpected instance size");
+    __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
+            Immediate(initial_map->pre_allocated_property_fields()));
+    __ Assert(equal, "Unexpected pre-allocated property fields count");
+    __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
+            Immediate(initial_map->unused_property_fields()));
+    __ Assert(equal, "Unexpected unused property fields count");
+    __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
+            Immediate(initial_map->inobject_properties()));
+    __ Assert(equal, "Unexpected in-object property fields count");
+  }
+
+  // Initialize map and fields of the newly allocated object.
+  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+  __ movq(FieldOperand(result, JSObject::kMapOffset), map);
+  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+  __ movq(FieldOperand(result, JSObject::kElementsOffset), scratch);
+  __ movq(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+  if (initial_map->inobject_properties() != 0) {
+    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+    for (int i = 0; i < initial_map->inobject_properties(); i++) {
+      int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+      __ movq(FieldOperand(result, property_offset), scratch);
+    }
+  }
+}
+
+
+void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+  Register result = ToRegister(instr->result());
+  Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
+  int instance_size = initial_map->instance_size();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Set(result, 0);
+
+  PushSafepointRegistersScope scope(this);
+  __ Push(Smi::FromInt(instance_size));
+  CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+  __ StoreToSafepointRegisterSlot(result, rax);
+}
+
+
 void LCodeGen::DoAllocate(LAllocate* instr) {
   class DeferredAllocate: public LDeferredCode {
    public:
@@ -5259,15 +5312,12 @@
 
 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   Register input = ToRegister(instr->value());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* true_label = chunk_->GetAssemblyLabel(true_block);
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
   Condition final_branch_condition =
-      EmitTypeofIs(true_label, false_label, input, instr->type_literal());
+      EmitTypeofIs(instr->TrueLabel(chunk_),
+          instr->FalseLabel(chunk_), input, instr->type_literal());
   if (final_branch_condition != no_condition) {
-    EmitBranch(true_block, false_block, final_branch_condition);
+    EmitBranch(instr, final_branch_condition);
   }
 }
 
@@ -5350,11 +5400,9 @@
 
 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
   Register temp = ToRegister(instr->temp());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
   EmitIsConstructCall(temp);
-  EmitBranch(true_block, false_block, equal);
+  EmitBranch(instr, equal);
 }
 
 
@@ -5506,15 +5554,15 @@
   // properly registered for deoptimization and records the assembler's PC
   // offset.
   LEnvironment* environment = instr->environment();
-  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
-                                   instr->SpilledDoubleRegisterArray());
 
   // If the environment were already registered, we would have no way of
   // backpatching it with the spill slot operands.
   ASSERT(!environment->HasBeenRegistered());
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-  ASSERT(osr_pc_offset_ == -1);
-  osr_pc_offset_ = masm()->pc_offset();
+
+  // Normally we record the first unknown OSR value as the entrypoint to the OSR
+  // code, but if there were none, record the entrypoint here.
+  if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
 }
 
 
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 31dd9c4..c89ec1f 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -79,7 +79,6 @@
   Heap* heap() const { return isolate()->heap(); }
   Zone* zone() const { return zone_; }
 
-  // TODO(svenpanne) Use this consistently.
   int LookupDestination(int block_id) const {
     return chunk()->LookupDestination(block_id);
   }
@@ -128,6 +127,7 @@
   void DoDeferredRandom(LRandom* instr);
   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocateObject(LAllocateObject* instr);
   void DoDeferredAllocate(LAllocate* instr);
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
@@ -282,7 +282,8 @@
 
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
-  void EmitBranch(int left_block, int right_block, Condition cc);
+  template<class InstrType>
+  void EmitBranch(InstrType instr, Condition cc);
   void EmitNumberUntagD(
       Register input,
       XMMRegister result,
@@ -311,7 +312,8 @@
   // true and false label should be made, to optimize fallthrough.
   Condition EmitIsString(Register input,
                          Register temp1,
-                         Label* is_not_string);
+                         Label* is_not_string,
+                         SmiCheck check_needed);
 
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 980cb21..7a475a7 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -43,31 +43,6 @@
 LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
 #undef DEFINE_COMPILE
 
-LOsrEntry::LOsrEntry() {
-  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
-    register_spills_[i] = NULL;
-  }
-  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
-    double_register_spills_[i] = NULL;
-  }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
-                                    LOperand* spill_operand) {
-  ASSERT(spill_operand->IsStackSlot());
-  ASSERT(register_spills_[allocation_index] == NULL);
-  register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
-                                          LOperand* spill_operand) {
-  ASSERT(spill_operand->IsDoubleStackSlot());
-  ASSERT(double_register_spills_[allocation_index] == NULL);
-  double_register_spills_[allocation_index] = spill_operand;
-}
-
 
 #ifdef DEBUG
 void LInstruction::VerifyCall() {
@@ -354,8 +329,7 @@
   constructor()->PrintTo(stream);
   stream->Add(" #%d / ", arity());
   ASSERT(hydrogen()->property_cell()->value()->IsSmi());
-  ElementsKind kind = static_cast<ElementsKind>(
-      Smi::cast(hydrogen()->property_cell()->value())->value());
+  ElementsKind kind = hydrogen()->elements_kind();
   stream->Add(" (%s) ", ElementsKindToString(kind));
 }
 
@@ -456,7 +430,7 @@
 LPlatformChunk* LChunkBuilder::Build() {
   ASSERT(is_unused());
   chunk_ = new(zone()) LPlatformChunk(info(), graph());
-  HPhase phase("L_Building chunk", chunk_);
+  LPhase phase("L_Building chunk", chunk_);
   status_ = BUILDING;
   const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
   for (int i = 0; i < blocks->length(); i++) {
@@ -1014,10 +988,13 @@
 
   LBranch* result = new(zone()) LBranch(UseRegister(value));
   // Tagged values that are not known smis or booleans require a
-  // deoptimization environment.
+  // deoptimization environment. If the instruction is generic no
+  // environment is needed since all cases are handled.
+  ToBooleanStub::Types expected = instr->expected_input_types();
   Representation rep = value->representation();
   HType type = value->type();
-  if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+  if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
+      !expected.IsGeneric()) {
     return AssignEnvironment(result);
   }
   return result;
@@ -1287,7 +1264,6 @@
 
 
 LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
-  ASSERT(FLAG_optimize_constructed_arrays);
   LOperand* constructor = UseFixed(instr->constructor(), rdi);
   argument_count_ -= instr->argument_count();
   LCallNewArray* result = new(zone()) LCallNewArray(constructor);
@@ -1456,7 +1432,7 @@
               instr->CheckFlag(HValue::kBailoutOnMinusZero))
           ? AssignEnvironment(result)
           : result;
-    } else if (instr->has_fixed_right_arg()) {
+    } else if (instr->fixed_right_arg().has_value) {
       LModI* mod = new(zone()) LModI(UseRegister(left),
                                      UseRegisterAtStart(right),
                                      NULL);
@@ -1936,7 +1912,7 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
   return AssignEnvironment(new(zone()) LCheckNonSmi(value));
 }
@@ -2357,6 +2333,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+  info()->MarkAsDeferredCalling();
+  LAllocateObject* result = new(zone()) LAllocateObject(TempRegister());
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
 LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
   info()->MarkAsDeferredCalling();
   LOperand* size = instr->size()->IsConstant()
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index cc73382..d6edd6d 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -50,6 +50,7 @@
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
   V(Allocate)                                   \
+  V(AllocateObject)                             \
   V(ApplyArguments)                             \
   V(ArgumentsElements)                          \
   V(ArgumentsLength)                            \
@@ -489,17 +490,44 @@
 template<int I, int T>
 class LControlInstruction: public LTemplateInstruction<0, I, T> {
  public:
+  LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
   virtual bool IsControl() const { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-  int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
-  int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+  int TrueDestination(LChunk* chunk) {
+    return chunk->LookupDestination(true_block_id());
+  }
+  int FalseDestination(LChunk* chunk) {
+    return chunk->LookupDestination(false_block_id());
+  }
+
+  Label* TrueLabel(LChunk* chunk) {
+    if (true_label_ == NULL) {
+      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+    }
+    return true_label_;
+  }
+  Label* FalseLabel(LChunk* chunk) {
+    if (false_label_ == NULL) {
+      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+    }
+    return false_label_;
+  }
+
+ protected:
+  int true_block_id() { return SuccessorAt(0)->block_id(); }
+  int false_block_id() { return SuccessorAt(1)->block_id(); }
 
  private:
   HControlInstruction* hydrogen() {
     return HControlInstruction::cast(this->hydrogen_value());
   }
+
+  Label* false_label_;
+  Label* true_label_;
 };
 
 
@@ -1190,7 +1218,7 @@
 };
 
 
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
+class LCmpMapAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LCmpMapAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1201,15 +1229,7 @@
   DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(CompareMap)
 
-  virtual bool IsControl() const { return true; }
-
   Handle<Map> map() const { return hydrogen()->map(); }
-  int true_block_id() const {
-    return hydrogen()->FirstSuccessor()->block_id();
-  }
-  int false_block_id() const {
-    return hydrogen()->SecondSuccessor()->block_id();
-  }
 };
 
 
@@ -2359,6 +2379,20 @@
   LOperand* value() { return inputs_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LAllocateObject: public LTemplateInstruction<1, 0, 1> {
+ public:
+  explicit LAllocateObject(LOperand* temp) {
+    temps_[0] = temp;
+  }
+
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
 };
 
 
@@ -2463,26 +2497,10 @@
 
 class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
  public:
-  LOsrEntry();
+  LOsrEntry() {}
 
   virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
-  LOperand** SpilledRegisterArray() { return register_spills_; }
-  LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
-  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
-  void MarkSpilledDoubleRegister(int allocation_index,
-                                 LOperand* spill_operand);
-
- private:
-  // Arrays of spill slot operands for registers with an assigned spill
-  // slot, i.e., that must also be restored to the spill slot on OSR entry.
-  // NULL if the register has no assigned spill slot.  Indexed by allocation
-  // index.
-  LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
-  LOperand* double_register_spills_[
-      DoubleRegister::kMaxNumAllocatableRegisters];
 };
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index b6a1d34..a4d64b9 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -2308,6 +2308,32 @@
 }
 
 
+template<class T>
+static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
+                                      T operand_or_register,
+                                      Label* not_unique_name,
+                                      Label::Distance distance) {
+  STATIC_ASSERT(((SYMBOL_TYPE - 1) & kIsInternalizedMask) == kInternalizedTag);
+  masm->cmpb(operand_or_register, Immediate(kInternalizedTag));
+  masm->j(less, not_unique_name, distance);
+  masm->cmpb(operand_or_register, Immediate(SYMBOL_TYPE));
+  masm->j(greater, not_unique_name, distance);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueName(Operand operand,
+                                         Label* not_unique_name,
+                                         Label::Distance distance) {
+  JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueName(Register reg,
+                                         Label* not_unique_name,
+                                         Label::Distance distance) {
+  JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
+}
+
 
 void MacroAssembler::Move(Register dst, Register src) {
   if (!dst.is(src)) {
@@ -2661,7 +2687,8 @@
   // rax = exception, rdi = code object, rdx = state.
   movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
   shr(rdx, Immediate(StackHandler::kKindWidth));
-  movq(rdx, FieldOperand(rbx, rdx, times_8, FixedArray::kHeaderSize));
+  movq(rdx,
+       FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
   SmiToInteger64(rdx, rdx);
   lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
   jmp(rdi);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 1b7e586..14228cc 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -758,6 +758,12 @@
       Label* on_fail,
       Label::Distance near_jump = Label::kFar);
 
+  // Checks if the given register or operand is a unique name
+  void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
+                           Label::Distance distance = Label::kFar);
+  void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
+                           Label::Distance distance = Label::kFar);
+
   // ---------------------------------------------------------------------------
   // Macro instructions.
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 0217fad..9dddd79 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -53,7 +53,7 @@
   ASSERT(kPointerSizeLog2 == kHeapObjectTagSize + 1);
   ScaleFactor scale_factor = times_2;
 
-  ASSERT_EQ(24, sizeof(StubCache::Entry));
+  ASSERT_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
   // The offset register holds the entry offset times four (due to masking
   // and shifting optimizations).
   ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
@@ -171,8 +171,8 @@
   USE(extra2);  // The register extra2 is not used on the X64 platform.
   USE(extra3);  // The register extra2 is not used on the X64 platform.
   // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 24.
-  ASSERT(sizeof(Entry) == 24);
+  // entry size being 3 * kPointerSize.
+  ASSERT(sizeof(Entry) == 3 * kPointerSize);
 
   // Make sure the flags do not name a specific type.
   ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@@ -1612,12 +1612,59 @@
 }
 
 
+Handle<Code> CallStubCompiler::CompileArrayCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<Cell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name,
+    Code::StubType type) {
+  Label miss;
+
+  // Check that function is still array
+  const int argc = arguments().immediate();
+  GenerateNameCheck(name, &miss);
+
+  if (cell.is_null()) {
+    // Get the receiver from the stack.
+    __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+    // Check that the receiver isn't a smi.
+    __ JumpIfSmi(rdx, &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                    name, &miss);
+  } else {
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
+    GenerateLoadFunctionFromCell(cell, function, &miss);
+  }
+
+  Handle<Smi> kind(Smi::FromInt(GetInitialFastElementsKind()), isolate());
+  Handle<Cell> kind_feedback_cell =
+      isolate()->factory()->NewCell(kind);
+  __ movq(rax, Immediate(argc));
+  __ Move(rbx, kind_feedback_cell);
+  __ Move(rdi, function);
+
+  ArrayConstructorStub stub(isolate());
+  __ TailCallStub(&stub);
+
+  __ bind(&miss);
+  GenerateMissBranch();
+
+  // Return the generated code.
+  return GetCode(type, name);
+}
+
+
 Handle<Code> CallStubCompiler::CompileArrayPushCall(
     Handle<Object> object,
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- rcx                 : name
   //  -- rsp[0]              : return address
@@ -1858,7 +1905,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -1867,7 +1914,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- rcx                 : name
   //  -- rsp[0]              : return address
@@ -1939,7 +1987,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -1948,7 +1996,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -2019,7 +2068,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -2028,7 +2077,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -2099,7 +2149,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return GetCode(function);
+  return GetCode(type, name);
 }
 
 
@@ -2108,7 +2158,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -2170,7 +2221,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+  return GetCode(type, name);
 }
 
 
@@ -2179,7 +2230,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // TODO(872): implement this.
   return Handle<Code>::null();
 }
@@ -2190,7 +2242,8 @@
     Handle<JSObject> holder,
     Handle<Cell> cell,
     Handle<JSFunction> function,
-    Handle<String> name) {
+    Handle<String> name,
+    Code::StubType type) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -2286,7 +2339,7 @@
   GenerateMissBranch();
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
+  return GetCode(type, name);
 }
 
 
@@ -2477,7 +2530,8 @@
   if (HasCustomCallGenerator(function)) {
     Handle<Code> code = CompileCustomCall(object, holder,
                                           Handle<PropertyCell>::null(),
-                                          function, Handle<String>::cast(name));
+                                          function, Handle<String>::cast(name),
+                                          Code::CONSTANT_FUNCTION);
     // A null handle means bail out to the regular compiler code below.
     if (!code.is_null()) return code;
   }
@@ -2571,7 +2625,8 @@
 
   if (HasCustomCallGenerator(function)) {
     Handle<Code> code = CompileCustomCall(
-        object, holder, cell, function, Handle<String>::cast(name));
+        object, holder, cell, function, Handle<String>::cast(name),
+        Code::NORMAL);
     // A null handle means bail out to the regular compiler code below.
     if (!code.is_null()) return code;
   }
diff --git a/src/zone-inl.h b/src/zone-inl.h
index e312b20..49e7626 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -40,7 +40,6 @@
 
 
 inline void* Zone::New(int size) {
-  ASSERT(scope_nesting_ > 0);
   // Round up the requested size to fit the alignment.
   size = RoundUp(size, kAlignment);
 
@@ -75,7 +74,7 @@
 
 
 bool Zone::excess_allocation() {
-  return segment_bytes_allocated_ > zone_excess_limit_;
+  return segment_bytes_allocated_ > kExcessLimit;
 }
 
 
@@ -110,17 +109,6 @@
 }
 
 
-ZoneScope::ZoneScope(Zone* zone, ZoneScopeMode mode)
-    : zone_(zone), mode_(mode) {
-  zone_->scope_nesting_++;
-}
-
-
-bool ZoneScope::ShouldDeleteOnExit() {
-  return zone_->scope_nesting_ == 1 && mode_ == DELETE_ON_EXIT;
-}
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_ZONE_INL_H_
diff --git a/src/zone.cc b/src/zone.cc
index 51b8113..82a2efa 100644
--- a/src/zone.cc
+++ b/src/zone.cc
@@ -68,19 +68,41 @@
 
 
 Zone::Zone(Isolate* isolate)
-    : zone_excess_limit_(256 * MB),
+    : allocation_size_(0),
       segment_bytes_allocated_(0),
       position_(0),
       limit_(0),
-      scope_nesting_(0),
       segment_head_(NULL),
       isolate_(isolate) {
 }
-unsigned Zone::allocation_size_ = 0;
 
-ZoneScope::~ZoneScope() {
-  if (ShouldDeleteOnExit()) zone_->DeleteAll();
-  zone_->scope_nesting_--;
+
+Zone::~Zone() {
+#ifdef DEBUG
+  // Constant byte value used for zapping dead memory in debug mode.
+  static const unsigned char kZapDeadByte = 0xcd;
+#endif
+
+  // Traverse the chained list of segments, zapping
+  // (in debug mode) and freeing every segment
+  Segment* current = segment_head_;
+  while (current != NULL) {
+    Segment* next = current->next();
+    int size = current->size();
+#ifdef DEBUG
+    // Zap the entire current segment (including the header).
+    memset(current, kZapDeadByte, size);
+#endif
+    DeleteSegment(current, size);
+    current = next;
+  }
+
+  // We must clear the position and limit to force
+  // a new segment to be allocated on demand.
+  position_ = limit_ = 0;
+
+  // Update the head segment.
+  segment_head_ = NULL;
 }
 
 
@@ -104,66 +126,6 @@
 }
 
 
-void Zone::DeleteAll() {
-#ifdef DEBUG
-  // Constant byte value used for zapping dead memory in debug mode.
-  static const unsigned char kZapDeadByte = 0xcd;
-#endif
-
-  // Find a segment with a suitable size to keep around.
-  Segment* keep = segment_head_;
-  while (keep != NULL && keep->size() > kMaximumKeptSegmentSize) {
-    keep = keep->next();
-  }
-
-  // Traverse the chained list of segments, zapping (in debug mode)
-  // and freeing every segment except the one we wish to keep.
-  Segment* current = segment_head_;
-  while (current != NULL) {
-    Segment* next = current->next();
-    if (current == keep) {
-      // Unlink the segment we wish to keep from the list.
-      current->clear_next();
-    } else {
-      int size = current->size();
-#ifdef DEBUG
-      // Zap the entire current segment (including the header).
-      memset(current, kZapDeadByte, size);
-#endif
-      DeleteSegment(current, size);
-    }
-    current = next;
-  }
-
-  // If we have found a segment we want to keep, we must recompute the
-  // variables 'position' and 'limit' to prepare for future allocate
-  // attempts. Otherwise, we must clear the position and limit to
-  // force a new segment to be allocated on demand.
-  if (keep != NULL) {
-    Address start = keep->start();
-    position_ = RoundUp(start, kAlignment);
-    limit_ = keep->end();
-#ifdef DEBUG
-    // Zap the contents of the kept segment (but not the header).
-    memset(start, kZapDeadByte, keep->capacity());
-#endif
-  } else {
-    position_ = limit_ = 0;
-  }
-
-  // Update the head segment to be the kept segment (if any).
-  segment_head_ = keep;
-}
-
-
-void Zone::DeleteKeptSegment() {
-  if (segment_head_ != NULL) {
-    DeleteSegment(segment_head_, segment_head_->size());
-    segment_head_ = NULL;
-  }
-}
-
-
 Address Zone::NewExpand(int size) {
   // Make sure the requested size is already properly aligned and that
   // there isn't enough room in the Zone to satisfy the request.
diff --git a/src/zone.h b/src/zone.h
index 01e887e..5545e3c 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -39,13 +39,6 @@
 namespace internal {
 
 
-// Zone scopes are in one of two modes.  Either they delete the zone
-// on exit or they do not.
-enum ZoneScopeMode {
-  DELETE_ON_EXIT,
-  DONT_DELETE_ON_EXIT
-};
-
 class Segment;
 class Isolate;
 
@@ -65,7 +58,7 @@
 class Zone {
  public:
   explicit Zone(Isolate* isolate);
-  ~Zone() { DeleteKeptSegment(); }
+  ~Zone();
   // Allocate 'size' bytes of memory in the Zone; expands the Zone by
   // allocating new segments of memory on demand using malloc().
   inline void* New(int size);
@@ -73,26 +66,18 @@
   template <typename T>
   inline T* NewArray(int length);
 
-  // Deletes all objects and free all memory allocated in the Zone. Keeps one
-  // small (size <= kMaximumKeptSegmentSize) segment around if it finds one.
-  void DeleteAll();
-
-  // Deletes the last small segment kept around by DeleteAll().
-  void DeleteKeptSegment();
-
   // Returns true if more memory has been allocated in zones than
   // the limit allows.
   inline bool excess_allocation();
 
   inline void adjust_segment_bytes_allocated(int delta);
 
-  inline Isolate* isolate() { return isolate_; }
+  inline unsigned allocation_size() { return allocation_size_; }
 
-  static unsigned allocation_size_;
+  inline Isolate* isolate() { return isolate_; }
 
  private:
   friend class Isolate;
-  friend class ZoneScope;
 
   // All pointers returned from New() have this alignment.  In addition, if the
   // object being allocated has a size that is divisible by 8 then its alignment
@@ -105,11 +90,11 @@
   // Never allocate segments larger than this size in bytes.
   static const int kMaximumSegmentSize = 1 * MB;
 
-  // Never keep segments larger than this size in bytes around.
-  static const int kMaximumKeptSegmentSize = 64 * KB;
-
   // Report zone excess when allocation exceeds this limit.
-  int zone_excess_limit_;
+  static const int kExcessLimit = 256 * MB;
+
+  // The number of bytes allocated in this zone so far.
+  unsigned allocation_size_;
 
   // The number of bytes allocated in segments.  Note that this number
   // includes memory allocated from the OS but not yet allocated from
@@ -135,8 +120,6 @@
   Address position_;
   Address limit_;
 
-  int scope_nesting_;
-
   Segment* segment_head_;
   Isolate* isolate_;
 };
@@ -229,31 +212,6 @@
 };
 
 
-// ZoneScopes keep track of the current parsing and compilation
-// nesting and cleans up generated ASTs in the Zone when exiting the
-// outer-most scope.
-class ZoneScope BASE_EMBEDDED {
- public:
-  INLINE(ZoneScope(Zone* zone, ZoneScopeMode mode));
-
-  virtual ~ZoneScope();
-
-  inline bool ShouldDeleteOnExit();
-
-  // For ZoneScopes that do not delete on exit by default, call this
-  // method to request deletion on exit.
-  void DeleteOnExit() {
-    mode_ = DELETE_ON_EXIT;
-  }
-
-  inline static int nesting();
-
- private:
-  Zone* zone_;
-  ZoneScopeMode mode_;
-};
-
-
 // A zone splay tree.  The config type parameter encapsulates the
 // different configurations of a concrete splay tree (see splay-tree.h).
 // The tree itself and all its elements are allocated in the Zone.